|
| 1 | +// Copyright (C) 2018-2024 Intel Corporation |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | +// |
| 4 | + |
| 5 | +#include <memory> |
| 6 | + |
| 7 | +#include "common_op_table.hpp" |
| 8 | +#include "openvino/core/shape.hpp" |
| 9 | +#include "openvino/op/broadcast.hpp" |
| 10 | +#include "openvino/op/convert.hpp" |
| 11 | +#include "openvino/op/equal.hpp" |
| 12 | +#include "openvino/op/less.hpp" |
| 13 | +#include "openvino/op/multiply.hpp" |
| 14 | +#include "openvino/op/range.hpp" |
| 15 | +#include "openvino/op/reduce_sum.hpp" |
| 16 | +#include "openvino/op/select.hpp" |
| 17 | +#include "openvino/op/shape_of.hpp" |
| 18 | +#include "openvino/op/unsqueeze.hpp" |
| 19 | + |
| 20 | +using namespace std; |
| 21 | +using namespace ov; |
| 22 | +using namespace ov::op; |
| 23 | + |
| 24 | +namespace ov { |
| 25 | +namespace frontend { |
| 26 | +namespace tensorflow { |
| 27 | +namespace op { |
| 28 | + |
| 29 | +OutputVector translate_bincount_op(const NodeContext& node) { |
| 30 | + default_op_checks(node, 3, {"Bincount"}); |
| 31 | + auto arr = node.get_input(0); |
| 32 | + auto size = node.get_input(1); |
| 33 | + auto weights = node.get_input(2); |
| 34 | + |
| 35 | + auto scalar_shape = make_shared<v0::Constant>(element::i32, ov::Shape{0}, std::vector<int32_t>{}); |
| 36 | + size = make_shared<v1::Reshape>(size, scalar_shape, false); |
| 37 | + |
| 38 | + auto weights_type = weights.get_element_type(); |
| 39 | + |
| 40 | + if (weights.get_partial_shape() == ov::Shape{0}) { |
| 41 | + auto arr_shape = make_shared<v3::ShapeOf>(arr, element::i32); |
| 42 | + weights = make_shared<v0::Constant>(weights_type, Shape{}, std::vector<int>{1}); |
| 43 | + weights = make_shared<v3::Broadcast>(weights, arr_shape); |
| 44 | + } |
| 45 | + |
| 46 | + // implementation |
| 47 | + auto start = make_shared<v0::Constant>(element::i32, Shape{}, std::vector<int>{0}); |
| 48 | + auto step = make_shared<v0::Constant>(element::i32, Shape{}, std::vector<int>{1}); |
| 49 | + auto range = make_shared<v4::Range>(start, size, step, element::i32); |
| 50 | + |
| 51 | + // Reshape arr and weights to 1D tensors |
| 52 | + auto const_flatten_shape = make_shared<v0::Constant>(element::i32, Shape{1}, std::vector<int32_t>{-1}); |
| 53 | + auto arr_reshaped = make_shared<v1::Reshape>(arr, const_flatten_shape, false); |
| 54 | + auto weights_reshaped = make_shared<v1::Reshape>(weights, const_flatten_shape, false); |
| 55 | + |
| 56 | + // Unsqueeze range to [size, 1] shape and unsqueeze arr and weights to shapes [1, num] |
| 57 | + auto const_axis_zero = make_shared<v0::Constant>(element::i32, Shape{1}, vector<int>({0})); |
| 58 | + auto const_axis_one = make_shared<v0::Constant>(element::i32, Shape{1}, vector<int>({1})); |
| 59 | + auto unsqueeze_range = make_shared<v0::Unsqueeze>(range, const_axis_one); |
| 60 | + auto unsqueeze_arr = make_shared<v0::Unsqueeze>(arr_reshaped, const_axis_zero); |
| 61 | + auto unsqueeze_weights = make_shared<v0::Unsqueeze>(weights_reshaped, const_axis_zero); |
| 62 | + |
| 63 | + // Generate a mask [size, num] on range == arr |
| 64 | + auto mask = make_shared<v1::Equal>(unsqueeze_range, unsqueeze_arr); |
| 65 | + // Compute the weighted mask |
| 66 | + auto mask_casted = make_shared<v0::Convert>(mask, weights_type); |
| 67 | + |
| 68 | + auto to_sum = make_shared<v1::Multiply>(mask_casted, unsqueeze_weights); |
| 69 | + auto reduce_axis = make_shared<v0::Constant>(element::i32, Shape{}, 1); |
| 70 | + auto result = make_shared<v1::ReduceSum>(to_sum, reduce_axis); |
| 71 | + |
| 72 | + set_node_name(node.get_name(), result); |
| 73 | + |
| 74 | + return {result}; |
| 75 | +} |
| 76 | +} // namespace op |
| 77 | +} // namespace tensorflow |
| 78 | +} // namespace frontend |
| 79 | +} // namespace ov |
0 commit comments