Skip to content

Commit 7dbf702

Browse files
committed
added tests for tensor functions
1 parent 605aac1 commit 7dbf702

File tree

5 files changed

+214
-9
lines changed

5 files changed

+214
-9
lines changed

nncf/experimental/tensor/functions/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@
99
# See the License for the specific language governing permissions and
1010
# limitations under the License.
1111

12+
from nncf.experimental.tensor.functions import linalg as linalg
1213
from nncf.experimental.tensor.functions.numeric import abs as abs
1314
from nncf.experimental.tensor.functions.numeric import all as all
1415
from nncf.experimental.tensor.functions.numeric import allclose as allclose

nncf/experimental/tensor/functions/numeric.py

+6-4
Original file line numberDiff line numberDiff line change
@@ -478,7 +478,7 @@ def sum(a: Tensor, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims:
478478
with size one.
479479
:return: Returns the sum of all elements in the input tensor in the given axis.
480480
"""
481-
return sum(a.data, axis, keepdims)
481+
return Tensor(sum(a.data, axis, keepdims))
482482

483483

484484
@functools.singledispatch
@@ -496,7 +496,7 @@ def multiply(x1: Tensor, x2: Union[Tensor, float]) -> Tensor:
496496

497497
@functools.singledispatch
498498
@tensor_guard
499-
def var(a: Tensor, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> Tensor:
499+
def var(a: Tensor, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ddof: int = 0) -> Tensor:
500500
"""
501501
Compute the variance along the specified axis.
502502
@@ -505,9 +505,11 @@ def var(a: Tensor, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims:
505505
of the flattened tensor.
506506
:param keepdims: If this is set to True, the axes which are reduced are left in the result as dimensions
507507
with size one.
508-
:return: eturns a new tensor containing the variance;
508+
:param ddof: “Delta Degrees of Freedom”: difference between the sample size and sample degrees of freedom.
509+
By default ddof is zero.
510+
:return: A new tensor containing the variance.
509511
"""
510-
return Tensor(var(a.data, axis, keepdims))
512+
return Tensor(var(a.data, axis, keepdims, ddof))
511513

512514

513515
@functools.singledispatch

nncf/experimental/tensor/functions/numpy_numeric.py

+6-3
Original file line numberDiff line numberDiff line change
@@ -235,10 +235,13 @@ def _(x1: Union[np.ndarray, np.generic], x2: Union[np.ndarray, np.generic, float
235235

236236

237237
@register_numpy_types(numeric.var)
238-
def var(
239-
a: Union[np.ndarray, np.generic], axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False
238+
def _(
239+
a: Union[np.ndarray, np.generic],
240+
axis: Optional[Union[int, Tuple[int, ...]]] = None,
241+
keepdims: bool = False,
242+
ddof: int = 0,
240243
) -> np.ndarray:
241-
return np.array(np.var(a, axis=axis, keepdims=keepdims))
244+
return np.array(np.var(a, axis=axis, keepdims=keepdims, ddof=ddof))
242245

243246

244247
@register_numpy_types(numeric.size)

nncf/experimental/tensor/functions/torch_numeric.py

+4-2
Original file line numberDiff line numberDiff line change
@@ -233,8 +233,10 @@ def _(x1: torch.Tensor, x2: Union[torch.Tensor, float]) -> torch.Tensor:
233233

234234

235235
@numeric.var.register(torch.Tensor)
236-
def var(a: torch.Tensor, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False) -> torch.Tensor:
237-
return torch.var(a, dim=axis, keepdim=keepdims)
236+
def _(
237+
a: torch.Tensor, axis: Optional[Union[int, Tuple[int, ...]]] = None, keepdims: bool = False, ddof: int = 0
238+
) -> torch.Tensor:
239+
return torch.var(a, dim=axis, keepdim=keepdims, correction=ddof)
238240

239241

240242
@numeric.size.register(torch.Tensor)

tests/shared/test_templates/template_test_nncf_tensor.py

+197
Original file line numberDiff line numberDiff line change
@@ -753,3 +753,200 @@ def test_fn_mean_per_channel_incorrect_axis(self, axis):
753753
tensor = Tensor(self.to_tensor([[[9.0, 9.0], [0.0, 3.0]], [[5.0, 1.0], [7.0, 1.0]]]))
754754
with pytest.raises(ValueError, match="is out of bounds for array of dimension"):
755755
s_fns.mean_per_channel(tensor, axis)
756+
757+
def test_size(self):
758+
tensor = Tensor(self.to_tensor([1, 1]))
759+
res = tensor.size
760+
assert res == 2
761+
762+
def test_item(self):
763+
tensor = Tensor(self.to_tensor([1]))
764+
res = tensor.item()
765+
assert res == 1
766+
767+
@pytest.mark.parametrize(
768+
"val, min, max, ref",
769+
(([0.9, 2.1], 1.0, 2.0, [1.0, 2.0]), ([0.9, 2.1], [0.0, 2.5], [0.5, 3.0], [0.5, 2.5])),
770+
)
771+
def test_fn_clip(self, val, min, max, ref):
772+
tensor = Tensor(self.to_tensor(val))
773+
if isinstance(min, list):
774+
min = Tensor(self.to_tensor(min))
775+
if isinstance(max, list):
776+
max = Tensor(self.to_tensor(max))
777+
ref_tensor = self.to_tensor(ref)
778+
779+
res = fns.clip(tensor, min, max)
780+
781+
assert isinstance(res, Tensor)
782+
assert fns.allclose(res.data, ref_tensor)
783+
assert res.device == tensor.device
784+
785+
def test_fn_as_tensor_like(self):
786+
tensor = Tensor(self.to_tensor([1]))
787+
data = [1.0, 2.0]
788+
ref = self.to_tensor(data)
789+
790+
res = fns.as_tensor_like(tensor, data)
791+
792+
assert isinstance(res, Tensor)
793+
assert fns.allclose(res.data, ref)
794+
assert res.device == tensor.device
795+
796+
@pytest.mark.parametrize(
797+
"x, axis, keepdims, ref",
798+
(
799+
(
800+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
801+
0,
802+
False,
803+
[0.9, 0.9, 0.3],
804+
),
805+
(
806+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
807+
0,
808+
True,
809+
[[0.9, 0.9, 0.3]],
810+
),
811+
(
812+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
813+
(0, 1),
814+
True,
815+
[[2.1]],
816+
),
817+
(
818+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
819+
None,
820+
False,
821+
2.1,
822+
),
823+
),
824+
)
825+
def test_fn_sum(self, x, axis, keepdims, ref):
826+
tensor = Tensor(self.to_tensor(x))
827+
ref_tensor = self.to_tensor(ref)
828+
829+
res = fns.sum(tensor, axis, keepdims)
830+
831+
assert isinstance(res, Tensor)
832+
assert fns.allclose(res.data, ref_tensor)
833+
assert res.device == tensor.device
834+
835+
@pytest.mark.parametrize(
836+
"a, b, ref",
837+
(
838+
(
839+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
840+
[[0.1, 0.7, 0.1], [0.8, 0.2, 0.2]],
841+
[[0.08, 0.14, 0.02], [0.08, 0.14, 0.02]],
842+
),
843+
(
844+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
845+
0.1,
846+
[[0.08, 0.02, 0.02], [0.01, 0.07, 0.01]],
847+
),
848+
),
849+
)
850+
def test_fn_multiply(self, a, b, ref):
851+
tensor_a = Tensor(self.to_tensor(a))
852+
tensor_b = Tensor(self.to_tensor(b))
853+
ref_tensor = self.to_tensor(ref)
854+
855+
res = fns.multiply(tensor_a, tensor_b)
856+
857+
assert isinstance(res, Tensor)
858+
assert fns.allclose(res.data, ref_tensor)
859+
assert res.device == tensor_a.device
860+
861+
@pytest.mark.parametrize(
862+
"x, axis, keepdims, ddof, ref",
863+
(
864+
(
865+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
866+
0,
867+
False,
868+
0,
869+
[0.1225, 0.0625, 0.0025],
870+
),
871+
(
872+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
873+
0,
874+
True,
875+
1,
876+
[[0.245, 0.125, 0.005]],
877+
),
878+
(
879+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
880+
(0, 1),
881+
True,
882+
0,
883+
[[0.0825]],
884+
),
885+
(
886+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
887+
None,
888+
False,
889+
1,
890+
0.099,
891+
),
892+
),
893+
)
894+
def test_fn_var(self, x, axis, keepdims, ddof, ref):
895+
tensor = Tensor(self.to_tensor(x))
896+
ref_tensor = self.to_tensor(ref)
897+
898+
res = fns.var(tensor, axis, keepdims, ddof)
899+
900+
assert isinstance(res, Tensor)
901+
assert fns.allclose(res.data, ref_tensor)
902+
assert res.device == tensor.device
903+
904+
@pytest.mark.parametrize(
905+
"x, ord, axis, keepdims, ref",
906+
(
907+
(
908+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
909+
None,
910+
0,
911+
False,
912+
[0.80622577, 0.72801099, 0.2236068],
913+
),
914+
(
915+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
916+
"fro",
917+
None,
918+
True,
919+
[[1.10905365]],
920+
),
921+
(
922+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
923+
"nuc",
924+
(0, 1),
925+
True,
926+
[[1.53063197]],
927+
),
928+
(
929+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
930+
float("inf"),
931+
0,
932+
False,
933+
[0.8, 0.7, 0.2],
934+
),
935+
(
936+
[[0.8, 0.2, 0.2], [0.1, 0.7, 0.1]],
937+
2,
938+
None,
939+
False,
940+
0.9364634205074938,
941+
),
942+
),
943+
)
944+
def test_fn_linalg_norm(self, x, ord, axis, keepdims, ref):
945+
tensor = Tensor(self.to_tensor(x))
946+
ref_tensor = self.to_tensor(ref)
947+
948+
res = fns.linalg.norm(tensor, ord, axis, keepdims)
949+
950+
assert isinstance(res, Tensor)
951+
assert fns.allclose(res.data, ref_tensor)
952+
assert res.device == tensor.device

0 commit comments

Comments
 (0)