|
| 1 | +// Copyright (C) 2018-2022 Intel Corporation |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | +// |
| 4 | + |
| 5 | +#include "op_table.hpp" |
| 6 | +#include "openvino/op/util/attr_types.hpp" |
| 7 | +#include "openvino/opsets/opset8.hpp" |
| 8 | +#include "utils.hpp" |
| 9 | + |
| 10 | +using namespace std; |
| 11 | +using namespace ov::opset8; |
| 12 | + |
| 13 | +namespace ov { |
| 14 | +namespace frontend { |
| 15 | +namespace tensorflow { |
| 16 | +namespace op { |
| 17 | + |
| 18 | +OutputVector translate_matrix_diag_op(const NodeContext& node) { |
| 19 | + // The translation of MatrixDiag to OpenVINO opset relies on padding of input tensor with zeros, |
| 20 | + // reshape to a special form and cutting of unneeded padding part. |
| 21 | + // Here is a basic idea described by an example, |
| 22 | + // let us have a tensor [1, 2, 3] and generate padding tensor of zeros with a shape [3, 3]. |
| 23 | + // Concatenate input tensor with padding and get the following: |
| 24 | + // [[1, 0, 0, 0] |
| 25 | + // [2, 0, 0, 0] |
| 26 | + // [3, 0, 0, 0]] of shape [3, 4] |
| 27 | + // Reshape to tensor of a shape [12] equal to [1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0] |
| 28 | + // Cut off last 3 elements and get [1, 0, 0, 0, 2, 0, 0, 0, 3] and reshape to [3, 3] |
| 29 | + // This idea is generalized to higher rank tensors |
| 30 | + TENSORFLOW_OP_VALIDATION(node, node.get_input_size() > 0, "MatrixDiag must have at least one input."); |
| 31 | + // diagonal is the single input to MatrixDiag operation and has a shape [I, J, ..., M, N] |
| 32 | + auto diagonal = node.get_input(0); |
| 33 | + auto diagonal_type = diagonal.get_element_type(); |
| 34 | + |
| 35 | + // 1. unsqueeze to have at least three rank input of a shape [1, I, J, ..., M, N, 1] |
| 36 | + // because dimensions [I, J, ..., M] can be absent |
| 37 | + auto unsqueeze_axis = make_shared<Constant>(element::i64, Shape{2}, std::vector<int64_t>{0, -1}); |
| 38 | + auto unsqueeze_diag = make_shared<Unsqueeze>(diagonal, unsqueeze_axis); |
| 39 | + |
| 40 | + // 2. compute a size of the last dimension of the diagonal input of a shape [I, J, ..., M, N], |
| 41 | + // i.e. N that will be diagonalized |
| 42 | + auto unsqueeze_diag_shape = make_shared<ShapeOf>(unsqueeze_diag); |
| 43 | + auto last_dim = make_shared<StridedSlice>(unsqueeze_diag_shape, |
| 44 | + make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{-2}), |
| 45 | + make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{-1}), |
| 46 | + make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{1}), |
| 47 | + std::vector<int64_t>({0}), |
| 48 | + std::vector<int64_t>({0})); |
| 49 | + |
| 50 | + // 3. generate a tensor of zeros of a shape [1, I, J, ..., M, N, N] |
| 51 | + auto diag_shape = make_shared<ShapeOf>(diagonal); |
| 52 | + auto one_dim = make_shared<Constant>(last_dim->get_element_type(), Shape{1}, std::vector<int64_t>{1}); |
| 53 | + auto padding_shape = make_shared<Concat>(OutputVector({one_dim, diag_shape, last_dim}), 0); |
| 54 | + auto padding = |
| 55 | + make_shared<Broadcast>(make_shared<Constant>(diagonal_type, Shape{1}, std::vector<int64_t>{0}), padding_shape); |
| 56 | + |
| 57 | + // 4. concatenate to get input tensor with zero padding of a shape [1, I, J, ..., M, N, N + 1] |
| 58 | + auto zero_padded_diag = make_shared<Concat>(OutputVector({unsqueeze_diag, padding}), -1); |
| 59 | + |
| 60 | + // reshape padded tensor to get a shape [I, J, ..., M, N * N + N] |
| 61 | + // 4.1 retrieve a part of the shape value [1, I, J, ..., M] |
| 62 | + auto new_shape_padded_diag1 = |
| 63 | + make_shared<StridedSlice>(unsqueeze_diag_shape, |
| 64 | + make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{0}), |
| 65 | + make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{-2}), |
| 66 | + make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{1}), |
| 67 | + std::vector<int64_t>({0}), |
| 68 | + std::vector<int64_t>({0})); |
| 69 | + // 4.2 compute the last part of a shape that is [N * N + N] |
| 70 | + auto last_dim_squared = make_shared<Multiply>(last_dim, last_dim); |
| 71 | + auto new_shape_padded_diag2 = make_shared<Add>(last_dim_squared, last_dim); |
| 72 | + // 4.3 compute a new shape and reshape padded diagonal |
| 73 | + auto new_shape_padded_diag = make_shared<Concat>(OutputVector({new_shape_padded_diag1, new_shape_padded_diag2}), 0); |
| 74 | + auto reshaped_padded_diag = make_shared<Reshape>(zero_padded_diag, new_shape_padded_diag, false); |
| 75 | + |
| 76 | + // 5. cut off padding in the reshaped padded tensor to get a shape [1, I, J, ..., M, N * N] |
| 77 | + auto cut_padded_diag = make_shared<Slice>( |
| 78 | + reshaped_padded_diag, |
| 79 | + make_shared<Constant>(last_dim_squared->get_element_type(), Shape{1}, std::vector<int64_t>{0}), |
| 80 | + last_dim_squared, |
| 81 | + make_shared<Constant>(last_dim_squared->get_element_type(), Shape{1}, std::vector<int64_t>{1}), |
| 82 | + make_shared<Constant>(element::i64, Shape{1}, std::vector<int64_t>{-1})); |
| 83 | + |
| 84 | + // 6. return the expected shape for the result [I, J, ..., M, N, N] |
| 85 | + auto resulted_shape = make_shared<Concat>(OutputVector({diag_shape, last_dim}), 0); |
| 86 | + auto resulted_diag = make_shared<Reshape>(cut_padded_diag, resulted_shape, false); |
| 87 | + |
| 88 | + set_node_name(node.get_name(), resulted_diag); |
| 89 | + return {resulted_diag}; |
| 90 | +} |
| 91 | +} // namespace op |
| 92 | +} // namespace tensorflow |
| 93 | +} // namespace frontend |
| 94 | +} // namespace ov |
0 commit comments