@@ -52,6 +52,12 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) {
52
52
OPENVINO_THROW (" Cannot create OpenVINO batch norm with unsupported number of inputs" );
53
53
}
54
54
} // namespace set_1
55
+ /*
56
+ Opset 6 is skipped because there are no significant difference between opset1 and opset6.
57
+ Found difference is:
58
+ 1. In Training, the computation of ReduceMean and ReduceVar uses float
59
+ to avoid overflow for float16 inputs.
60
+ */
55
61
56
62
namespace set_7 {
57
63
// This version supports ONNX BatchNormalization-7 and BatchNormalization-9
@@ -71,8 +77,42 @@ ov::OutputVector batch_norm(const ov::frontend::onnx::Node& node) {
71
77
72
78
return {std::make_shared<v5::BatchNormInference>(x, scale, bias, mean, var, epsilon)};
73
79
}
74
-
75
80
} // namespace set_7
81
+ /*
82
+ Opset 9 is skipped because there are no significant difference between opset7 and opset9.
83
+ Found difference is:
84
+ 1. removed -> spatial : int (default is 1)
85
+ If true, compute the mean and variance across per activation. If false, compute the mean and variance across
86
+ per feature over each mini-batch.
87
+
88
+ */
89
+
90
+ namespace set_14 {
91
+ // This version supports ONNX BatchNormalization-14 BatchNormalization-15
92
+ ov::OutputVector batch_norm (const ov::frontend::onnx::Node& node) {
93
+ ov::OutputVector inputs{node.get_ov_inputs ()};
94
+ auto x = inputs.at (0 );
95
+ auto scale = inputs.at (1 );
96
+ auto bias = inputs.at (2 );
97
+ auto mean = inputs.at (3 );
98
+ auto var = inputs.at (4 );
99
+
100
+ double epsilon{node.get_attribute_value <double >(" epsilon" , 1e-5 )};
101
+ int64_t training_mode{node.get_attribute_value <int64_t >(" training_mode" , 0 )};
102
+
103
+ CHECK_VALID_NODE (node,
104
+ training_mode == false && node.get_outputs_size () == 1 ,
105
+ " Training mode of BatchNormalization is not supported." );
106
+ return {std::make_shared<v5::BatchNormInference>(x, scale, bias, mean, var, epsilon)};
107
+ }
108
+ } // namespace set_14
109
+ /*
110
+ Opset 15 is skipped because there are no significant difference between opset14 and opset15.
111
+ Found difference is:
112
+ 1. In Training, the computation of ReduceMean and ReduceVar uses float
113
+ to avoid overflow for float16 inputs.
114
+ */
115
+
76
116
} // namespace op
77
117
} // namespace onnx
78
118
} // namespace frontend
0 commit comments