From 99c88f3a04986ba89e438315d33d0e37fad5089f Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Wed, 28 Oct 2020 12:25:43 +0000 Subject: [PATCH 1/6] check and fix tensor and scalar type promotion --- python/paddle/fluid/dygraph/math_op_patch.py | 40 +- python/paddle/fluid/layers/math_op_patch.py | 37 +- ...st_tensor_scalar_type_promotion_dynamic.py | 318 +++++++++++++++ ...est_tensor_scalar_type_promotion_static.py | 369 ++++++++++++++++++ 4 files changed, 737 insertions(+), 27 deletions(-) create mode 100644 python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py create mode 100644 python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 68206f62860852..e4dba3b9eae998 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -149,21 +149,31 @@ def _binary_creator_(method_name, reverse=False, scalar_method=None): def __impl__(self, other_var): - # FIXME(zjl): elementwise_div between integers cannot be converted to scale, - # which may lose accuracy. This is a hot fix for release 1.6. - if scalar_method is not None and not ( - op_type == 'elementwise_div' and - self.dtype in _supported_int_dtype_): - if isinstance(other_var, float): - if self.dtype in _supported_int_dtype_: - assert other_var == int(other_var), \ - "float value {} cannot convert to integer".format(other_var) - return scalar_method(self, other_var) - elif isinstance(other_var, int): - return scalar_method(self, float(other_var)) - + # 1. scalar exists cases + # we need combine the tensor.dtype and scalar.dtype, cast correct object + if isinstance(other_var, float): + # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float + if self.dtype in _supported_int_dtype_: + self = astype(self, 'float32') + elif isinstance(other_var, int): + # in all cases(+, -, *, /, **, //, %), we can cast it to float + # because the output tensor.dtype depend on the type of input tensor + other_var = float(other_var) + # division is a special case + if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: + self = astype(self, 'float32') + else: + raise TypeError( + "Only supports scalar operations of `int` and `float` now.") + + # 2. scalar method selected + # here use `scale` replace `elementwise` to get better performance + # but only +, -, *, / can use this method + if scalar_method is not None: + return scalar_method(self, other_var) + + # 3. create varbase for scalar lhs_dtype = self.dtype - if not isinstance(other_var, core.VarBase): if reverse: other_var = create_tensor( @@ -172,6 +182,7 @@ def __impl__(self, other_var): # add fill_op other_var = create_scalar(value=other_var, dtype=lhs_dtype) + # 4. unify right var type to left var rhs_dtype = other_var.dtype if lhs_dtype != rhs_dtype: other_var = astype(other_var, lhs_dtype) @@ -180,6 +191,7 @@ def __impl__(self, other_var): self = other_var other_var = tmp + # 5. calculation axis = -1 math_op = getattr(core.ops, op_type) return math_op(self, other_var, 'axis', axis) diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 92b58a7e2ee4c7..7fde3632753460 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -215,21 +215,31 @@ def _binary_creator_(method_name, reverse=False, scalar_method=None): def __impl__(self, other_var): - # FIXME(zjl): elementwise_div between integers cannot be converted to scale, - # which may lose accuracy. This is a hot fix for release 1.6. - if scalar_method is not None and not ( - op_type == 'elementwise_div' and - self.dtype in _supported_int_dtype_): - if isinstance(other_var, float): - if self.dtype in _supported_int_dtype_: - assert other_var == int(other_var), \ - "float value {} cannot convert to integer".format(other_var) - return scalar_method(self, other_var) - elif isinstance(other_var, int): - return scalar_method(self, float(other_var)) + # 1. scalar exists cases + # we need combine the tensor.dtype and scalar.dtype, cast correct object + if isinstance(other_var, float): + # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float + if self.dtype in _supported_int_dtype_: + self = astype(self, 'float32') + elif isinstance(other_var, int): + # in all cases(+, -, *, /, **, //, %), we can cast it to float + # because the output tensor.dtype depend on the type of input tensor + other_var = float(other_var) + # division is a special case + if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: + self = astype(self, 'float32') + else: + raise TypeError( + "Only supports scalar operations of `int` and `float` now.") - lhs_dtype = safe_get_dtype(self) + # 2. scalar method selected + # here use `scale` replace `elementwise` to get better performance + # but only +, -, *, / can use this method + if scalar_method is not None: + return scalar_method(self, other_var) + # 3. create variable for scalar + lhs_dtype = safe_get_dtype(self) if not isinstance(other_var, Variable): if reverse: has_batch_size = False @@ -251,6 +261,7 @@ def __impl__(self, other_var): other_var = create_scalar( current_block(self), value=other_var, dtype=lhs_dtype) + # 4. unify right var type to left var rhs_dtype = safe_get_dtype(other_var) if lhs_dtype != rhs_dtype: other_var = astype(other_var, lhs_dtype) diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py new file mode 100644 index 00000000000000..5f2dfbdd99e161 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_dynamic.py @@ -0,0 +1,318 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function, division + +import unittest +import numpy as np + +import paddle + +# Support types are ref from `paddle.tensor.math` +# - Related paddle dtypes: +# - int type: int64, (no test here: uint8, int8, int16, int32) +# - float type: float32, (no test here: float64) +# - Python scalar dtypes: +# - int(64) +# - float(64) + + +class TestTensorScalarTypePromotionDynamic(unittest.TestCase): + def check_operation(self, a, b, c, op): + if op == '+': + c_rlt = a + b + elif op == '-': + c_rlt = a - b + elif op == '*': + c_rlt = a * b + elif op == '/': + c_rlt = a / b + elif op == '**': + c_rlt = a**b + elif op == '//': + c_rlt = a // b + elif op == '%': + c_rlt = a % b + else: + raise ValueError("Unsupported operation.") + + self.assertEqual(c_rlt.dtype, c.dtype) + self.assertTrue(np.array_equal(c_rlt.numpy(), c.numpy())) + + def test_tensor_add_scalar(self): + # tensor(int64) + scalar(int) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1 + c = paddle.full([2, 2, 2], 2, dtype="int64") + self.check_operation(a, b, c, '+') + + # tensor(float32) + scalar(int) + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '+') + + # tensor(int64) + scalar(float, .0) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.0 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '+') + + # tensor(int64) + scalar(float, .5) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.5 + c = paddle.full([2, 2, 2], 2.5, dtype="float32") + self.check_operation(a, b, c, '+') + + # tensor(float32) + scalar(float) + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1.5 + c = paddle.full([2, 2, 2], 2.5, dtype="float32") + self.check_operation(a, b, c, '+') + + def test_tensor_sub_scalar(self): + # tensor(int64) - scalar(int) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1 + c = paddle.zeros([2, 2, 2], dtype="int64") + self.check_operation(a, b, c, '-') + + # tensor(float32) - scalar(int) + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1 + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # tensor(int64) - scalar(float, .0) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.0 + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # tensor(int64) - scalar(float, .5) + a = paddle.full([2, 2, 2], 2, dtype='int64') + b = 1.5 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + # tensor(float32) - scalar(float) + a = paddle.full([2, 2, 2], 2, dtype='float32') + b = 1.5 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + def test_scalar_sub_tensor(self): + # scalar(int) - tensor(int64) + a = 1 + b = paddle.ones([2, 2, 2], dtype='int64') + c = paddle.zeros([2, 2, 2], dtype="int64") + self.check_operation(a, b, c, '-') + + # scalar(int) - tensor(float32) + a = 1 + b = paddle.ones([2, 2, 2], dtype='float32') + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # scalar(float, .0) - tensor(int64) + a = 1.0 + b = paddle.ones([2, 2, 2], dtype='int64') + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # scalar(float, .5) - tensor(int64) + a = 1.5 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], -0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + # scalar(float) - tensor(float32) + a = 1.5 + b = paddle.full([2, 2, 2], 2, dtype='float32') + c = paddle.full([2, 2, 2], -0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + def test_tensor_mul_tensor(self): + # tensor(int64) * scalar(int) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1 + c = paddle.ones([2, 2, 2], dtype="int64") + self.check_operation(a, b, c, '*') + + # tensor(float32) * scalar(int) + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1 + c = paddle.ones([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '*') + + # tensor(int64) * scalar(float, .0) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.0 + c = paddle.ones([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '*') + + # tensor(int64) * scalar(float, .5) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.5 + c = paddle.full([2, 2, 2], 1.5, dtype="float32") + self.check_operation(a, b, c, '*') + + # tensor(float32) * scalar(float) + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1.5 + c = paddle.full([2, 2, 2], 1.5, dtype="float32") + self.check_operation(a, b, c, '*') + + def test_tensor_div_scalar(self): + # tensor(int64) / scalar(int) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 2 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(float32) / scalar(int) + a = paddle.ones([2, 2, 2], dtype='float32') + b = 2 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(int64) / scalar(float, .0) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 2.0 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(int64) / scalar(float, .5) + a = paddle.ones([2, 2, 2], dtype='int64') + b = 0.5 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(float32) / scalar(float) + a = paddle.ones([2, 2, 2], dtype='float32') + b = 0.5 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + def test_scalar_div_tensor(self): + # scalar(int) / tensor(int64) + a = 1 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # scalar(int) / tensor(float32) + a = 1 + b = paddle.full([2, 2, 2], 0.5, dtype='float32') + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + # scalar(float) / tensor(int64) + a = 1.0 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # scalar(float) / tensor(float32) + a = 1.0 + b = paddle.full([2, 2, 2], 0.5, dtype='float32') + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + def test_tensor_pow_scalar(self): + # tensor(int64) ** scalar(int) + a = paddle.full([2, 2, 2], 2, dtype='int64') + b = 3 + c = paddle.full([2, 2, 2], 8, dtype="int64") + self.check_operation(a, b, c, '**') + + # tensor(int64) ** scalar(float) + a = paddle.full([2, 2, 2], 2, dtype='int64') + b = 3.0 + c = paddle.full([2, 2, 2], 8, dtype="float32") + self.check_operation(a, b, c, '**') + + # tensor(float32) ** scalar(int) + a = paddle.full([2, 2, 2], 2, dtype='float32') + b = 3 + c = paddle.full([2, 2, 2], 8, dtype="float32") + self.check_operation(a, b, c, '**') + + # tensor(float32) ** scalar(float) + a = paddle.full([2, 2, 2], 2, dtype='float32') + b = 3.0 + c = paddle.full([2, 2, 2], 8, dtype="float32") + self.check_operation(a, b, c, '**') + + def test_scalar_pow_tensor(self): + # scalar(int) ** tensor(int64) + a = 3 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 9, dtype="int64") + self.check_operation(a, b, c, '**') + + # scalar(float) ** tensor(int64) + a = 3.0 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 9, dtype="float32") + self.check_operation(a, b, c, '**') + + # scalar(int) ** tensor(float32) + a = 3 + b = paddle.full([2, 2, 2], 2, dtype='float32') + c = paddle.full([2, 2, 2], 9, dtype="float32") + self.check_operation(a, b, c, '**') + + # tensor(float32) ** scalar(float) + a = 3.0 + b = paddle.full([2, 2, 2], 2, dtype='float32') + c = paddle.full([2, 2, 2], 9, dtype="float32") + self.check_operation(a, b, c, '**') + + ## TODO: floordiv op kernel doesn't support float + def test_tensor_floordiv_scalar(self): + # tensor(int64) // scalar(int) + a = paddle.full([2, 2, 2], 3, dtype='int64') + b = 2 + c = paddle.full([2, 2, 2], 1, dtype="int64") + self.check_operation(a, b, c, '//') + + def test_tensor_mod_scalar(self): + # tensor(int64) % scalar(int) + a = paddle.full([2, 2, 2], 3, dtype='int64') + b = 2 + c = paddle.full([2, 2, 2], 1, dtype="int64") + self.check_operation(a, b, c, '%') + + # tensor(int64) % scalar(float) + a = paddle.full([2, 2, 2], 3, dtype='int64') + b = 2.0 + c = paddle.full([2, 2, 2], 1, dtype="float32") + self.check_operation(a, b, c, '%') + + # tensor(float32) % scalar(int) + a = paddle.full([2, 2, 2], 3, dtype='float32') + b = 2 + c = paddle.full([2, 2, 2], 1, dtype="float32") + self.check_operation(a, b, c, '%') + + # tensor(float32) % scalar(float) + a = paddle.full([2, 2, 2], 3, dtype='float32') + b = 2.0 + c = paddle.full([2, 2, 2], 1, dtype="float32") + self.check_operation(a, b, c, '%') + + +if __name__ == '__main__': + unittest.main() diff --git a/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py new file mode 100644 index 00000000000000..d697666e12ddd1 --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_tensor_scalar_type_promotion_static.py @@ -0,0 +1,369 @@ +# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function, division + +import unittest +import numpy as np + +import paddle +from paddle.static import program_guard +from paddle.static import Program + +# Support types are ref from `paddle.tensor.math` +# - Related paddle dtypes: +# - int type: int64, (no test here: uint8, int8, int16, int32) +# - float type: float32, (no test here: float64) +# - Python scalar dtypes: +# - int(64) +# - float(64) + + +class TestTensorScalarTypePromotionStatic(unittest.TestCase): + def setUp(self): + paddle.enable_static() + + def check_operation(self, a, b, c, op): + exe = paddle.static.Executor() + + if op == '+': + c_rlt = a + b + elif op == '-': + c_rlt = a - b + elif op == '*': + c_rlt = a * b + elif op == '/': + c_rlt = a / b + elif op == '**': + c_rlt = a**b + elif op == '//': + c_rlt = a // b + elif op == '%': + c_rlt = a % b + else: + raise ValueError("Unsupported operation.") + + rlt = exe.run(fetch_list=[c_rlt.name, c.name]) + + self.assertEqual(rlt[0].dtype, rlt[1].dtype) + self.assertTrue(np.array_equal(rlt[0], rlt[1])) + + def test_tensor_add_scalar(self): + # tensor(int64) + scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1 + c = paddle.full([2, 2, 2], 2, dtype="int64") + self.check_operation(a, b, c, '+') + + # tensor(float32) + scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '+') + + # tensor(int64) + scalar(float, .0) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.0 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '+') + + # tensor(int64) + scalar(float, .5) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.5 + c = paddle.full([2, 2, 2], 2.5, dtype="float32") + self.check_operation(a, b, c, '+') + + # tensor(float32) + scalar(float) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1.5 + c = paddle.full([2, 2, 2], 2.5, dtype="float32") + self.check_operation(a, b, c, '+') + + def test_tensor_sub_scalar(self): + # tensor(int64) - scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1 + c = paddle.zeros([2, 2, 2], dtype="int64") + self.check_operation(a, b, c, '-') + + # tensor(float32) - scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1 + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # tensor(int64) - scalar(float, .0) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.0 + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # tensor(int64) - scalar(float, .5) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 2, dtype='int64') + b = 1.5 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + # tensor(float32) - scalar(float) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 2, dtype='float32') + b = 1.5 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + def test_scalar_sub_tensor(self): + # scalar(int) - tensor(int64) + with program_guard(Program()): + a = 1 + b = paddle.ones([2, 2, 2], dtype='int64') + c = paddle.zeros([2, 2, 2], dtype="int64") + self.check_operation(a, b, c, '-') + + # scalar(int) - tensor(float32) + with program_guard(Program()): + a = 1 + b = paddle.ones([2, 2, 2], dtype='float32') + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # scalar(float, .0) - tensor(int64) + with program_guard(Program()): + a = 1.0 + b = paddle.ones([2, 2, 2], dtype='int64') + c = paddle.zeros([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '-') + + # scalar(float, .5) - tensor(int64) + with program_guard(Program()): + a = 1.5 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], -0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + # scalar(float) - tensor(float32) + with program_guard(Program()): + a = 1.5 + b = paddle.full([2, 2, 2], 2, dtype='float32') + c = paddle.full([2, 2, 2], -0.5, dtype="float32") + self.check_operation(a, b, c, '-') + + def test_tensor_mul_tensor(self): + # tensor(int64) * scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1 + c = paddle.ones([2, 2, 2], dtype="int64") + self.check_operation(a, b, c, '*') + + # tensor(float32) * scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1 + c = paddle.ones([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '*') + + # tensor(int64) * scalar(float, .0) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.0 + c = paddle.ones([2, 2, 2], dtype="float32") + self.check_operation(a, b, c, '*') + + # tensor(int64) * scalar(float, .5) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 1.5 + c = paddle.full([2, 2, 2], 1.5, dtype="float32") + self.check_operation(a, b, c, '*') + + # tensor(float32) * scalar(float) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='float32') + b = 1.5 + c = paddle.full([2, 2, 2], 1.5, dtype="float32") + self.check_operation(a, b, c, '*') + + def test_tensor_div_scalar(self): + # tensor(int64) / scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 2 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(float32) / scalar(int) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='float32') + b = 2 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(int64) / scalar(float, .0) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 2.0 + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(int64) / scalar(float, .5) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='int64') + b = 0.5 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + # tensor(float32) / scalar(float) + with program_guard(Program()): + a = paddle.ones([2, 2, 2], dtype='float32') + b = 0.5 + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + def test_scalar_div_tensor(self): + # scalar(int) / tensor(int64) + with program_guard(Program()): + a = 1 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # scalar(int) / tensor(float32) + with program_guard(Program()): + a = 1 + b = paddle.full([2, 2, 2], 0.5, dtype='float32') + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + # scalar(float) / tensor(int64) + with program_guard(Program()): + a = 1.0 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 0.5, dtype="float32") + self.check_operation(a, b, c, '/') + + # scalar(float) / tensor(float32) + with program_guard(Program()): + a = 1.0 + b = paddle.full([2, 2, 2], 0.5, dtype='float32') + c = paddle.full([2, 2, 2], 2, dtype="float32") + self.check_operation(a, b, c, '/') + + def test_tensor_pow_scalar(self): + # tensor(int64) ** scalar(int) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 2, dtype='int64') + b = 3 + c = paddle.full([2, 2, 2], 8, dtype="int64") + self.check_operation(a, b, c, '**') + + # tensor(int64) ** scalar(float) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 2, dtype='int64') + b = 3.0 + c = paddle.full([2, 2, 2], 8, dtype="float32") + self.check_operation(a, b, c, '**') + + # tensor(float32) ** scalar(int) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 2, dtype='float32') + b = 3 + c = paddle.full([2, 2, 2], 8, dtype="float32") + self.check_operation(a, b, c, '**') + + # tensor(float32) ** scalar(float) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 2, dtype='float32') + b = 3.0 + c = paddle.full([2, 2, 2], 8, dtype="float32") + self.check_operation(a, b, c, '**') + + def test_scalar_pow_tensor(self): + # scalar(int) ** tensor(int64) + with program_guard(Program()): + a = 3 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 9, dtype="int64") + self.check_operation(a, b, c, '**') + + # scalar(float) ** tensor(int64) + with program_guard(Program()): + a = 3.0 + b = paddle.full([2, 2, 2], 2, dtype='int64') + c = paddle.full([2, 2, 2], 9, dtype="float32") + self.check_operation(a, b, c, '**') + + # scalar(int) ** tensor(float32) + with program_guard(Program()): + a = 3 + b = paddle.full([2, 2, 2], 2, dtype='float32') + c = paddle.full([2, 2, 2], 9, dtype="float32") + self.check_operation(a, b, c, '**') + + # tensor(float32) ** scalar(float) + with program_guard(Program()): + a = 3.0 + b = paddle.full([2, 2, 2], 2, dtype='float32') + c = paddle.full([2, 2, 2], 9, dtype="float32") + self.check_operation(a, b, c, '**') + + # ## TODO: floordiv op kernel doesn't support float + def test_tensor_floordiv_scalar(self): + # tensor(int64) // scalar(int) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 3, dtype='int64') + b = 2 + c = paddle.full([2, 2, 2], 1, dtype="int64") + self.check_operation(a, b, c, '//') + + def test_tensor_mod_scalar(self): + # tensor(int64) % scalar(int) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 3, dtype='int64') + b = 2 + c = paddle.full([2, 2, 2], 1, dtype="int64") + self.check_operation(a, b, c, '%') + + # tensor(int64) % scalar(float) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 3, dtype='int64') + b = 2.0 + c = paddle.full([2, 2, 2], 1, dtype="float32") + self.check_operation(a, b, c, '%') + + # tensor(float32) % scalar(int) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 3, dtype='float32') + b = 2 + c = paddle.full([2, 2, 2], 1, dtype="float32") + self.check_operation(a, b, c, '%') + + # tensor(float32) % scalar(float) + with program_guard(Program()): + a = paddle.full([2, 2, 2], 3, dtype='float32') + b = 2.0 + c = paddle.full([2, 2, 2], 1, dtype="float32") + self.check_operation(a, b, c, '%') + + +if __name__ == '__main__': + unittest.main() From f1538926dca479fe1962f4baf27469a5cf34b848 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Wed, 28 Oct 2020 14:30:22 +0000 Subject: [PATCH 2/6] fix else branch error --- python/paddle/fluid/dygraph/math_op_patch.py | 4 ++-- python/paddle/fluid/layers/math_op_patch.py | 4 ++-- .../static_mode_white_list.cpython-35.pyc | Bin 0 -> 19105 bytes 3 files changed, 4 insertions(+), 4 deletions(-) create mode 100644 tools/__pycache__/static_mode_white_list.cpython-35.pyc diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index e4dba3b9eae998..d5d8e763c6978e 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -163,8 +163,8 @@ def __impl__(self, other_var): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') else: - raise TypeError( - "Only supports scalar operations of `int` and `float` now.") + # do nothing + pass # 2. scalar method selected # here use `scale` replace `elementwise` to get better performance diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 7fde3632753460..a0493530017809 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -229,8 +229,8 @@ def __impl__(self, other_var): if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') else: - raise TypeError( - "Only supports scalar operations of `int` and `float` now.") + # do nothing + pass # 2. scalar method selected # here use `scale` replace `elementwise` to get better performance diff --git a/tools/__pycache__/static_mode_white_list.cpython-35.pyc b/tools/__pycache__/static_mode_white_list.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5503b0329783028fd00f7a70097cf4b944749f3f GIT binary patch literal 19105 zcmeHPcf2G=ajtHX2m+CW$XQ5$B<@a#oDm>`1W2Su80PIayKi{2JFA)9_wI?DgK@^# z1{<7hoO8|r=bYtiY~!3G4*09Cny~xs{ImVHKpfZAVY;g;d{xz}^Upo^HZOR>`?HT< zBl0mB{pWpADGg8K;WE4ZKF{(=Vx9w>N_;K70m1rHHi zB)C}cP{G3l4;Ne_xKwbN;1Plof|G(%g3ASu6kH*Al;F{V#|R!Pc%0zzf+q-`D0q_K z$%3Z{o+@~n;OT-Z11@(B7mR}Oa(K+o}d)GMzAla1arYcPz#oV>jjOV z6|4kn!GYjVa9VIAxIyq*!RrLC7ra66M!}l|Zx*~o@K(Xw1aB9-L-0<)y9DnRyhrdU zg7*qORq$zoPZxZK;4=lECHQQ?`vjjOc)#Ft1)nGQe8C3<9~69n;0pyG5`0+jMS?FD ze2L&o1z#rka=}*!zEbd2f{zHkTJSZ3uN8cq;Ohn7AoxbXHwnI3@GXLG6?~iE+Xdet z_)fuh3BFtKJ%aBQe4pU^1wSD8LBS6Repv7$f*%$9nBb#=9~b_rQojwe=YbM!QTr0PVo1Fe-Qkm;GYElEch3}zY6|M z@Cm`c3;sj!pMw7qe3J4X5CUSr%>r&7aEpLj2HYy(oPb*g+$P|*0k;b{H{kXGcL+Ey z;En-z3b=E?T>|bJaDKqu0xk%+d%!&c?ip~efO`krC*ZyT_Y1gxzyksv81SHg2M1gj z@Q{Fu0xk}CXu!h)9v*N>z@-6~1w10)M8L^_QvsI;JTl;lfJX&9I^Zz@j}3TSz~cj+ z5b(r+Cj~q?;3)x54R~6>(*v#yct*f81D+LdRlu_Yo)hrgfae8V9dJ#+^8;QG@WOy= z1Fj2rQNW7>UJ~%qfR_dA1iU=p6#=gdcvV0a@alj(U^ieAfPf-k8ZZmk3n&9#6R;mp z1GK;QD|jpbc0BtOE`L4g*dH90lAE@Y;aa1-w4s4FPWqcvHZe1Ktww)_}JK zyglF@0q+cWSHQai-V^XC0q+g?)PPS5`1F9!2>8r^&kFeLfcFJ_PQd#EJ~!a=0zNH?-y86K0pB0+0|7r6@IwJV9PlFn zKN|330Ur(c@qnKQ_{o5u3i#=Op9%QcfS(Ka`G8*t_{D%<3i#!KUkUitfL{yv^?=_9 z_|1SD1AZ&uw*!7B;CBOlFW~nB{vhBF13nh;M*)8v@FxKu5BSr7KMVNtfWHX%%YeTM z`0Ieb3HaN9zYF;LfPV=1$AEtd_~(Fs3HaB5e+&3Tz`qClN5Fpu{8zvy10*6u#E6?k z+&tnI5x0zP_HPw&PE@mhE`0%s5x0%%0&d5DBF>HO2yRbeRKIW@DG_&yxO2o^BJLV- ze#G4(E{M2$#62SJ8F8WDmIH)0Zjh$3PdF^kxXC?j4Iu^&-I%p(>Nb;L5_`iLf? zjaWsjBMu@CBTh#gMcfea+KAUhyguR$5pRrmQ^cDi-V*WFh_^+&J>ne^?~Hg?#JeNj z6KC&>J3ACx@PjU(6f*X1T~F%~X5tnC4V9vZiQ@ z^x?ZWsoq~NvsJ!Z6*}LmZT;__kkJU^Y~%hF|w%PE2L z?J~7}s7+%Kn@etua&|Dw^j1z_yGN&cPqzh-(3a^qy!aaBVx~*p!*{51#bY(fv|<4c zIqJ`X$$VY)d%-*%?ansQ)MM6^>GV#ls!3H-5vu&CXm%c8Bj-y>bTQe_s8HpKyQkpr zicM<$WQ)mO-DI<3Q8f9Af8z#kYqOhtF)Ma_{brWUsT$5Czk_Dh)=g3jU#w~Jqjak5 zu+a~4G1J|=Z3`ROx69c=`%O96ukC}pTBliV38+Df#J{RtB^LPB))SrTLc>N&*78(K zl{I$U*A8${lSQ--6goYBnOEfvNxGb|yZL1QFmK>z+|Ao^k~NgPMLsWF7kBg3WG`FP zP5S&@)506H?BHa#9KXXmX#IkB3kqaQtHSvxc>U>M#?Emc_iLx!EhlUKk9(!vSn4_M zZlBwvA0)G7XWuQWoP+Z!>lMzV^PiK>=OndizD1}E)a&;d&4oSw3CJCfpJy-NamQ7-#GQeyk zC3cdmK}l)2@z!aTN!}K&E&OZRsZF173_q7H!YO-oRkZub&iI0R>&1RnP}=k(rtyxaA^QzSq?LH^6iyH!2e?>1HQY^PVegGD~0nj4SlPYzDd#wN`-yX&MCRk-E0 zIC;r)WiOI`|lUVDOtN$ zPSZu3^_kF%w`EdG=QqtM1$0)`yX4zJ-gI1}J5K`V_SdhH7p5CFg`8;1g@3p}0);lC zJhAg=q;(uT&`r|(W;Z&xj*C_zv+2xUO){FI*`jN&=k0oA-6jXxa5+c5%AKkD*Q!n) zgI1+dLrYlV+CA#rFAAQlq0;T#E9uIpk8(fs+U`*7u4gJuW>Uy03K9Xv;9$+TD+|}l zB)4YcIpu-2{_&nnziG~=+h8%J5lxUu1t2>(q1-n`!qB&Do4QPnl(*#ukgkz#^oaUW z(l)7);mn8nr)l$vw7B7UF|Qj{a&!7-MyJFjok0enR!(i%ZmAKft~x=b_s%-a(7KJJ!Uk_ z*egu35p>nmpH(qCax>U>5g)bP4(gHVLw&ed*6XI7!6gq%{$07{r%@xdPBY9W;D`oj zxt9@atjdfRl-!{y2#yx&40lXv*~;hc9@`a_MbVeq9jGP@@aS z^Zg(P&INVXj8xw?nz^_>h|@zF;21~- z%*ITdC|POoQ-j#DEkQ}Cnk6^9T6_WKfiXg z!lZMmf9IX5i$2lqE76umL~@kry}aG?8@0g=!RMT}(m1x;qGqQymRU1SVO(6aMBy&| zHetOgE0_L(&^mOS5_5OV;5LfGy4j}`(66fxaTS@i3GX;-S4~oBld9>O*V5^n4^R?$ zVD53cUNAJT7sOTSyia!i>==E3d}p3{>MS$Ic+kE6R8=(RE*w8E87Opz)UM9aGg{Fe z3whI|rl{6DJ~I(1H>5kZG50r_=WJwZ#w>L-BprBO^NYF+hs<{wz+Iovi1NbKr0a7- zMiuI!Qx}qTs~HS+LY!pyk_#%W8`!U;#D?L2LNHt@ZJo3F)93*1A)P@pAk1@hW6Ui1 z${eHF2Tv~Phx^0&64p(@KkFO5bR>yM2{5tV_XAxkkpTGz}O^>SG$+S9We zx=@;1s%gVLw!HI+LdB*&OvCKHdN-=koeN*euC)1_csJ{W+|_#2m7>~i5rX4BS~RmZ zYeuw;c>*)oE*NH_w4q9`yEe^J3!*JscaKiP(h5p5b=@OIOp)Vg*m~E4UZvT+mGhH* zVvj_VxpGd(MMSGn_q3!OB(Y4PqiwpJFEtaS$*z(bFe?wfCbkr+3G-Hv{F-)3!>^tN>m>|$;RF_eLDEoolCcOMW!%MM~yoy znPTlWl`(agSF5Ari>KgQV=fBD_DRe;my1bbJ2MDUG6{J)bE&7qY*3T#sBpO)Ev8&| zQ4?pAMZ4>%HruLRwH>2%K+1Q|ng6M>vUc~?r{d@mT~HTEa~YK-U}`CZvLga|%Uvks zm*t&pJzXizO)S|6%iVc$6~;U5vY0pmNf1HP&|ON;FvK=lwbj69xgdH=?#8vRm6#dEo&n1$G{ zG(gaNm)?%qko|^kgDpEkb)rA5U1_q)Sv#+5OV-<@DyP5emrd!RexBKe4VtpZ&QLKO zvC!m-hO104?=j(3@pgTZB_fBcrg7Z!lN-rMOGvP%)p~-TC0PV+%iKe1Y2@edG{N+~ zkC{TffSh40Z+Jd3@~T<+QuFM3MBjqaQp&n27BgdoNn|SeKlfpC)0e#1{dQ?Sn$gX=cw9CM6s44#lBOl0Wd)x{7L5zzbgNMop^OPiDNwtc#v)|96^zS=kE1uarpuN|>? z5~e8>p%tem)tYJV6j*2w;453A#!O~MV$hJ4NEc=doEPn0SL3j&XZ@idzHSK2PAAWt zM*B9(1)+S;mw9PE)JzEt5PcoZ)ZT`d^zYPKM6~>yF?C+k9}|B2$~;I)CY$qi9vLi7 zX_MqjJEl12HJ!4@;*JKbT%A6)JG=G}RHPJ^fxNT`;gEuUVmr(4ot_ZB>NL|}El5zY zYKCGKQPYCCRBGK)tEL-@WHQ7gM~VcJbwmo}>`V)V`;-U|j2Xpu7>58q<*QPNZf>~y z&c>jzxgIdoT{byqG+)-ux7kjp{gMx9mAc^?AYE zL4P-Pnjt{ycacuHl?faHEC(`NFb(u{zS%Z)=LAS)r3rbH$Ypt&m)PzRHC2U6{j#o( zX7t*c-tJd+^|!im(l3*7C9pm-)UTaD2_6P8rrsPt0tcAcOVOaG2wiegt(w4QQ840D z{GW73J ziKi}6x1_LWSmI?{Gs0b`p+wEGnGYv@RTG#p6VOh&%mIm~83Hj0HIz5{;nrgbbHAvj zt48Q~EM=B9(OqO;tO-HRi>f$CazTskR(I%M zYV|#NPDco%t~I^g@(kbeRAi<4XRv*kk{JBT1O~5r#ua!FxNSL;NPZ`lb}qPCBsUU7b(N>(f3S^uw*G z!+l3AhBH#t+>7QI?2(6GJxzV5tsH-0vfNZ@7VPY8idV){o8ZwQkojbnSEA^0m5`oc zyA+C9gsCt`t6S>7Y_t7a7dw7xA_K7tw~-z=4EW3xzIy;=VS+KB)d;s|P+QCtFp|&} zpDc=}ztK?Bu5{oYJCkYj<;j&@P1B`fhG8GC(D{x#p)rn8h6r~pF6R!+;I(^ANrljI zn;Qh@kognZM@`dnxQVE{8GYNt2?#fSQ!6!r01VsI08$=y!5c%w3zK-$Jt5EApm=0O`)FmTAD+Fm_ zVx*`#)ik9|V?6rsdq^aX@H<*2HS}GkL}^ygH}J(^P21{>_5>;)yLEf;#hB9HAMMn# zTSF4TzcGFT&{CKgnwd^ogJmi{d9lMDQ!qQo3e4`Z`k2Jk_^c#Ton#swlx}`iXvK^9 zniaz{%5rzT9j>->u9$kL)7zr%;f1N(xcH+jc=VR{hKq7Tsg;hx)ss( z^o_h~s}+3nAjjy((w(L>qj46JSe0O#d~CLvz|k;f1PDA9e2L`&^r)|%uZ>x<^41wicRHu+QN(Ge z+M~t9FU8-jDa7x?)efs3i!k{Zy2Fmg`shUs$D6G^L$s#bvK+K0wExF3SoNk|hot zb;e`=}WQGEf5V5; z4I_K)JpfDWy=k#MvR}hBinYJT>6TBgXG}PlFDL5epPC|fh}7$6pIkQ(ps8VZwd%yv zRq#!Pxy;e)a+eY=V?M|~ONaUBPJ-BHNAtcjOwpIUyVDa_jHdLx1o=F2eKV%^dxEMGUjiI^=Rtxe$BoQ^LrT`Nd0~4eW{#o9nN;+_o@BcQ@4@e z&~L^_%P#}X&g{24kS!k2&2D|(`0mNNtv*}J`?lAj0@%ydp~H^S43AoeE;A;bSsv(a z`59;oG&8}&g>LPt1K8a|@?oN(CvPVa9{qf0P`pc$0UHlM=?+<%Sq-xS%`wY-4O8rC%MYx+lfM%A*5Y3kX%&LQ!%ETo3g7%>({BCL^Eb~+=Ci9m4;Fsspmvjlvn6)Rr8S# zn$z?QY1$DQ(dFE{9EVBEnd8NjXB)sP&u?LgJvdVgNegTQGct|{roCT zb%1-k-kq+^L89?>#h(goxSr0;ud<{F$zqc6UDZ^NXO4Q9Z$F4xR%_Nkz4oEY4l;69 zS7B+5RM;d7Ow3dMzJG<#-MBsQNf+RfQuD`EEo0ol0oZCT63*y>q5jJy$1g_jUkox6 zf9Nre$pd*Zww419;XaK;9H*jq)>Cp|k7Yb>*j-b$sJe8=;y{tTgSBP3EHt`y8TPm` z{Y+p(tP{pQ*}Yt|N0C02d8+i=J(mafSa#Y}z+cuhJ1$&AS$>KUdrc7a0=-b?i3}39 z^IPWQO&rV+>NLJ+GkHz7^}mc5X8(WuuT%eT!G_N8L=r|{sEbc$$VMlBEQ$-(c>-VNJjWYw`w3G}=WILMc ze4Xj+LCk4u+FH=gr&68U21iRbCRROP~3h} Date: Thu, 29 Oct 2020 04:45:37 +0000 Subject: [PATCH 3/6] fix scalar method error --- python/paddle/fluid/dygraph/math_op_patch.py | 20 +++++++++++--------- python/paddle/fluid/layers/math_op_patch.py | 18 ++++++++++-------- 2 files changed, 21 insertions(+), 17 deletions(-) diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 8ee5e66025c9e4..9c2036ccf72dd3 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -162,6 +162,10 @@ def __impl__(self, other_var): # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float if self.dtype in _supported_int_dtype_: self = astype(self, 'float32') + # here use `scale` replace `elementwise` to get better performance + # but only +, -, *, / can use this method + if scalar_method is not None: + return scalar_method(self, other_var) elif isinstance(other_var, int): # in all cases(+, -, *, /, **, //, %), we can cast it to float # because the output tensor.dtype depend on the type of input tensor @@ -169,17 +173,15 @@ def __impl__(self, other_var): # division is a special case if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') + # here use `scale` replace `elementwise` to get better performance + # but only +, -, *, / can use this method + if scalar_method is not None: + return scalar_method(self, other_var) else: # do nothing pass - # 2. scalar method selected - # here use `scale` replace `elementwise` to get better performance - # but only +, -, *, / can use this method - if scalar_method is not None: - return scalar_method(self, other_var) - - # 3. create varbase for scalar + # 2. create varbase for scalar lhs_dtype = self.dtype if not isinstance(other_var, core.VarBase): if reverse: @@ -189,7 +191,7 @@ def __impl__(self, other_var): # add fill_op other_var = create_scalar(value=other_var, dtype=lhs_dtype) - # 4. unify right var type to left var + # 3. unify right var type to left var rhs_dtype = other_var.dtype if lhs_dtype != rhs_dtype: other_var = astype(other_var, lhs_dtype) @@ -198,7 +200,7 @@ def __impl__(self, other_var): self = other_var other_var = tmp - # 5. calculation + # 4. calculation axis = -1 math_op = getattr(core.ops, op_type) return math_op(self, other_var, 'axis', axis) diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index a0493530017809..845aa6365c312a 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -221,6 +221,10 @@ def __impl__(self, other_var): # in all cases(+, -, *, /, **, //, %), we need cast tensor.dtype to float if self.dtype in _supported_int_dtype_: self = astype(self, 'float32') + # here use `scale` replace `elementwise` to get better performance + # but only +, -, *, / can use this method + if scalar_method is not None: + return scalar_method(self, other_var) elif isinstance(other_var, int): # in all cases(+, -, *, /, **, //, %), we can cast it to float # because the output tensor.dtype depend on the type of input tensor @@ -228,17 +232,15 @@ def __impl__(self, other_var): # division is a special case if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') + # here use `scale` replace `elementwise` to get better performance + # but only +, -, *, / can use this method + if scalar_method is not None: + return scalar_method(self, other_var) else: # do nothing pass - # 2. scalar method selected - # here use `scale` replace `elementwise` to get better performance - # but only +, -, *, / can use this method - if scalar_method is not None: - return scalar_method(self, other_var) - - # 3. create variable for scalar + # 2. create variable for scalar lhs_dtype = safe_get_dtype(self) if not isinstance(other_var, Variable): if reverse: @@ -261,7 +263,7 @@ def __impl__(self, other_var): other_var = create_scalar( current_block(self), value=other_var, dtype=lhs_dtype) - # 4. unify right var type to left var + # 3. unify right var type to left var rhs_dtype = safe_get_dtype(other_var) if lhs_dtype != rhs_dtype: other_var = astype(other_var, lhs_dtype) From e76b4741a0402ff7777ded64475d844d1b4e5240 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Thu, 29 Oct 2020 07:26:49 +0000 Subject: [PATCH 4/6] fix test_math_op_path unittest --- python/paddle/fluid/dygraph/math_op_patch.py | 6 ++++++ python/paddle/fluid/layers/math_op_patch.py | 6 ++++++ python/paddle/fluid/tests/unittests/test_math_op_patch.py | 8 ++++++-- 3 files changed, 18 insertions(+), 2 deletions(-) diff --git a/python/paddle/fluid/dygraph/math_op_patch.py b/python/paddle/fluid/dygraph/math_op_patch.py index 9c2036ccf72dd3..203a5e0f86ac5b 100644 --- a/python/paddle/fluid/dygraph/math_op_patch.py +++ b/python/paddle/fluid/dygraph/math_op_patch.py @@ -171,6 +171,12 @@ def __impl__(self, other_var): # because the output tensor.dtype depend on the type of input tensor other_var = float(other_var) # division is a special case + # NOTE(chenweihang): because we cast tensor to float32 instead float64, + # the division result can only guarantee the numerical accuracy of 6 digits + # after the decimal point. The result of numpy calculation is of float64 type, + # so the calculation result here and the calculation result of numpy are + # different after 6 decimal point. If necessary, we can also use float64 here. + # torch's behavior here is consistent with ours if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance diff --git a/python/paddle/fluid/layers/math_op_patch.py b/python/paddle/fluid/layers/math_op_patch.py index 845aa6365c312a..8f5fdf52d95ef9 100644 --- a/python/paddle/fluid/layers/math_op_patch.py +++ b/python/paddle/fluid/layers/math_op_patch.py @@ -230,6 +230,12 @@ def __impl__(self, other_var): # because the output tensor.dtype depend on the type of input tensor other_var = float(other_var) # division is a special case + # NOTE(chenweihang): because we cast tensor to float32 instead float64, + # the division result can only guarantee the numerical accuracy of 6 digits + # after the decimal point. The result of numpy calculation is of float64 type, + # so the calculation result here and the calculation result of numpy are + # different after 6 decimal point. If necessary, we can also use float64 here. + # torch's behavior here is consistent with ours if op_type == 'elementwise_div' and self.dtype in _supported_int_dtype_: self = astype(self, 'float32') # here use `scale` replace `elementwise` to get better performance diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index f6eff22d6ce5f0..eec81b7f8fe4df 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -16,11 +16,15 @@ import unittest from decorator_helper import prog_scope +import paddle import paddle.fluid as fluid import numpy class TestMathOpPatches(unittest.TestCase): + def setUp(self): + paddle.enable_static() + @prog_scope() def test_add_scalar(self): a = fluid.layers.data(name="a", shape=[1]) @@ -197,8 +201,8 @@ def test_integer_div(self): feed={"a": a_np}, fetch_list=[b]) - b_np_actual = (a_np / 7).astype('int64') - self.assertTrue(numpy.array_equal(b_np, b_np_actual)) + b_np_actual = (a_np / 7).astype('float32') + self.assertTrue(numpy.allclose(b_np, b_np_actual)) @prog_scope() def test_equal(self): From 818880fc2bf2afbbfd28fea4060a3f32e60c0df2 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Thu, 29 Oct 2020 08:27:31 +0000 Subject: [PATCH 5/6] add future division for unittest --- python/paddle/fluid/tests/unittests/test_math_op_patch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/paddle/fluid/tests/unittests/test_math_op_patch.py b/python/paddle/fluid/tests/unittests/test_math_op_patch.py index eec81b7f8fe4df..76e371b216778f 100644 --- a/python/paddle/fluid/tests/unittests/test_math_op_patch.py +++ b/python/paddle/fluid/tests/unittests/test_math_op_patch.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import print_function +from __future__ import print_function, division import unittest from decorator_helper import prog_scope From 964799c6706ba8016260242db30050ec31d5a776 Mon Sep 17 00:00:00 2001 From: Chen Weihang Date: Fri, 30 Oct 2020 07:33:44 +0000 Subject: [PATCH 6/6] rm useless bin file --- .../static_mode_white_list.cpython-35.pyc | Bin 19105 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 tools/__pycache__/static_mode_white_list.cpython-35.pyc diff --git a/tools/__pycache__/static_mode_white_list.cpython-35.pyc b/tools/__pycache__/static_mode_white_list.cpython-35.pyc deleted file mode 100644 index 5503b0329783028fd00f7a70097cf4b944749f3f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 19105 zcmeHPcf2G=ajtHX2m+CW$XQ5$B<@a#oDm>`1W2Su80PIayKi{2JFA)9_wI?DgK@^# z1{<7hoO8|r=bYtiY~!3G4*09Cny~xs{ImVHKpfZAVY;g;d{xz}^Upo^HZOR>`?HT< zBl0mB{pWpADGg8K;WE4ZKF{(=Vx9w>N_;K70m1rHHi zB)C}cP{G3l4;Ne_xKwbN;1Plof|G(%g3ASu6kH*Al;F{V#|R!Pc%0zzf+q-`D0q_K z$%3Z{o+@~n;OT-Z11@(B7mR}Oa(K+o}d)GMzAla1arYcPz#oV>jjOV z6|4kn!GYjVa9VIAxIyq*!RrLC7ra66M!}l|Zx*~o@K(Xw1aB9-L-0<)y9DnRyhrdU zg7*qORq$zoPZxZK;4=lECHQQ?`vjjOc)#Ft1)nGQe8C3<9~69n;0pyG5`0+jMS?FD ze2L&o1z#rka=}*!zEbd2f{zHkTJSZ3uN8cq;Ohn7AoxbXHwnI3@GXLG6?~iE+Xdet z_)fuh3BFtKJ%aBQe4pU^1wSD8LBS6Repv7$f*%$9nBb#=9~b_rQojwe=YbM!QTr0PVo1Fe-Qkm;GYElEch3}zY6|M z@Cm`c3;sj!pMw7qe3J4X5CUSr%>r&7aEpLj2HYy(oPb*g+$P|*0k;b{H{kXGcL+Ey z;En-z3b=E?T>|bJaDKqu0xk%+d%!&c?ip~efO`krC*ZyT_Y1gxzyksv81SHg2M1gj z@Q{Fu0xk}CXu!h)9v*N>z@-6~1w10)M8L^_QvsI;JTl;lfJX&9I^Zz@j}3TSz~cj+ z5b(r+Cj~q?;3)x54R~6>(*v#yct*f81D+LdRlu_Yo)hrgfae8V9dJ#+^8;QG@WOy= z1Fj2rQNW7>UJ~%qfR_dA1iU=p6#=gdcvV0a@alj(U^ieAfPf-k8ZZmk3n&9#6R;mp z1GK;QD|jpbc0BtOE`L4g*dH90lAE@Y;aa1-w4s4FPWqcvHZe1Ktww)_}JK zyglF@0q+cWSHQai-V^XC0q+g?)PPS5`1F9!2>8r^&kFeLfcFJ_PQd#EJ~!a=0zNH?-y86K0pB0+0|7r6@IwJV9PlFn zKN|330Ur(c@qnKQ_{o5u3i#=Op9%QcfS(Ka`G8*t_{D%<3i#!KUkUitfL{yv^?=_9 z_|1SD1AZ&uw*!7B;CBOlFW~nB{vhBF13nh;M*)8v@FxKu5BSr7KMVNtfWHX%%YeTM z`0Ieb3HaN9zYF;LfPV=1$AEtd_~(Fs3HaB5e+&3Tz`qClN5Fpu{8zvy10*6u#E6?k z+&tnI5x0zP_HPw&PE@mhE`0%s5x0%%0&d5DBF>HO2yRbeRKIW@DG_&yxO2o^BJLV- ze#G4(E{M2$#62SJ8F8WDmIH)0Zjh$3PdF^kxXC?j4Iu^&-I%p(>Nb;L5_`iLf? zjaWsjBMu@CBTh#gMcfea+KAUhyguR$5pRrmQ^cDi-V*WFh_^+&J>ne^?~Hg?#JeNj z6KC&>J3ACx@PjU(6f*X1T~F%~X5tnC4V9vZiQ@ z^x?ZWsoq~NvsJ!Z6*}LmZT;__kkJU^Y~%hF|w%PE2L z?J~7}s7+%Kn@etua&|Dw^j1z_yGN&cPqzh-(3a^qy!aaBVx~*p!*{51#bY(fv|<4c zIqJ`X$$VY)d%-*%?ansQ)MM6^>GV#ls!3H-5vu&CXm%c8Bj-y>bTQe_s8HpKyQkpr zicM<$WQ)mO-DI<3Q8f9Af8z#kYqOhtF)Ma_{brWUsT$5Czk_Dh)=g3jU#w~Jqjak5 zu+a~4G1J|=Z3`ROx69c=`%O96ukC}pTBliV38+Df#J{RtB^LPB))SrTLc>N&*78(K zl{I$U*A8${lSQ--6goYBnOEfvNxGb|yZL1QFmK>z+|Ao^k~NgPMLsWF7kBg3WG`FP zP5S&@)506H?BHa#9KXXmX#IkB3kqaQtHSvxc>U>M#?Emc_iLx!EhlUKk9(!vSn4_M zZlBwvA0)G7XWuQWoP+Z!>lMzV^PiK>=OndizD1}E)a&;d&4oSw3CJCfpJy-NamQ7-#GQeyk zC3cdmK}l)2@z!aTN!}K&E&OZRsZF173_q7H!YO-oRkZub&iI0R>&1RnP}=k(rtyxaA^QzSq?LH^6iyH!2e?>1HQY^PVegGD~0nj4SlPYzDd#wN`-yX&MCRk-E0 zIC;r)WiOI`|lUVDOtN$ zPSZu3^_kF%w`EdG=QqtM1$0)`yX4zJ-gI1}J5K`V_SdhH7p5CFg`8;1g@3p}0);lC zJhAg=q;(uT&`r|(W;Z&xj*C_zv+2xUO){FI*`jN&=k0oA-6jXxa5+c5%AKkD*Q!n) zgI1+dLrYlV+CA#rFAAQlq0;T#E9uIpk8(fs+U`*7u4gJuW>Uy03K9Xv;9$+TD+|}l zB)4YcIpu-2{_&nnziG~=+h8%J5lxUu1t2>(q1-n`!qB&Do4QPnl(*#ukgkz#^oaUW z(l)7);mn8nr)l$vw7B7UF|Qj{a&!7-MyJFjok0enR!(i%ZmAKft~x=b_s%-a(7KJJ!Uk_ z*egu35p>nmpH(qCax>U>5g)bP4(gHVLw&ed*6XI7!6gq%{$07{r%@xdPBY9W;D`oj zxt9@atjdfRl-!{y2#yx&40lXv*~;hc9@`a_MbVeq9jGP@@aS z^Zg(P&INVXj8xw?nz^_>h|@zF;21~- z%*ITdC|POoQ-j#DEkQ}Cnk6^9T6_WKfiXg z!lZMmf9IX5i$2lqE76umL~@kry}aG?8@0g=!RMT}(m1x;qGqQymRU1SVO(6aMBy&| zHetOgE0_L(&^mOS5_5OV;5LfGy4j}`(66fxaTS@i3GX;-S4~oBld9>O*V5^n4^R?$ zVD53cUNAJT7sOTSyia!i>==E3d}p3{>MS$Ic+kE6R8=(RE*w8E87Opz)UM9aGg{Fe z3whI|rl{6DJ~I(1H>5kZG50r_=WJwZ#w>L-BprBO^NYF+hs<{wz+Iovi1NbKr0a7- zMiuI!Qx}qTs~HS+LY!pyk_#%W8`!U;#D?L2LNHt@ZJo3F)93*1A)P@pAk1@hW6Ui1 z${eHF2Tv~Phx^0&64p(@KkFO5bR>yM2{5tV_XAxkkpTGz}O^>SG$+S9We zx=@;1s%gVLw!HI+LdB*&OvCKHdN-=koeN*euC)1_csJ{W+|_#2m7>~i5rX4BS~RmZ zYeuw;c>*)oE*NH_w4q9`yEe^J3!*JscaKiP(h5p5b=@OIOp)Vg*m~E4UZvT+mGhH* zVvj_VxpGd(MMSGn_q3!OB(Y4PqiwpJFEtaS$*z(bFe?wfCbkr+3G-Hv{F-)3!>^tN>m>|$;RF_eLDEoolCcOMW!%MM~yoy znPTlWl`(agSF5Ari>KgQV=fBD_DRe;my1bbJ2MDUG6{J)bE&7qY*3T#sBpO)Ev8&| zQ4?pAMZ4>%HruLRwH>2%K+1Q|ng6M>vUc~?r{d@mT~HTEa~YK-U}`CZvLga|%Uvks zm*t&pJzXizO)S|6%iVc$6~;U5vY0pmNf1HP&|ON;FvK=lwbj69xgdH=?#8vRm6#dEo&n1$G{ zG(gaNm)?%qko|^kgDpEkb)rA5U1_q)Sv#+5OV-<@DyP5emrd!RexBKe4VtpZ&QLKO zvC!m-hO104?=j(3@pgTZB_fBcrg7Z!lN-rMOGvP%)p~-TC0PV+%iKe1Y2@edG{N+~ zkC{TffSh40Z+Jd3@~T<+QuFM3MBjqaQp&n27BgdoNn|SeKlfpC)0e#1{dQ?Sn$gX=cw9CM6s44#lBOl0Wd)x{7L5zzbgNMop^OPiDNwtc#v)|96^zS=kE1uarpuN|>? z5~e8>p%tem)tYJV6j*2w;453A#!O~MV$hJ4NEc=doEPn0SL3j&XZ@idzHSK2PAAWt zM*B9(1)+S;mw9PE)JzEt5PcoZ)ZT`d^zYPKM6~>yF?C+k9}|B2$~;I)CY$qi9vLi7 zX_MqjJEl12HJ!4@;*JKbT%A6)JG=G}RHPJ^fxNT`;gEuUVmr(4ot_ZB>NL|}El5zY zYKCGKQPYCCRBGK)tEL-@WHQ7gM~VcJbwmo}>`V)V`;-U|j2Xpu7>58q<*QPNZf>~y z&c>jzxgIdoT{byqG+)-ux7kjp{gMx9mAc^?AYE zL4P-Pnjt{ycacuHl?faHEC(`NFb(u{zS%Z)=LAS)r3rbH$Ypt&m)PzRHC2U6{j#o( zX7t*c-tJd+^|!im(l3*7C9pm-)UTaD2_6P8rrsPt0tcAcOVOaG2wiegt(w4QQ840D z{GW73J ziKi}6x1_LWSmI?{Gs0b`p+wEGnGYv@RTG#p6VOh&%mIm~83Hj0HIz5{;nrgbbHAvj zt48Q~EM=B9(OqO;tO-HRi>f$CazTskR(I%M zYV|#NPDco%t~I^g@(kbeRAi<4XRv*kk{JBT1O~5r#ua!FxNSL;NPZ`lb}qPCBsUU7b(N>(f3S^uw*G z!+l3AhBH#t+>7QI?2(6GJxzV5tsH-0vfNZ@7VPY8idV){o8ZwQkojbnSEA^0m5`oc zyA+C9gsCt`t6S>7Y_t7a7dw7xA_K7tw~-z=4EW3xzIy;=VS+KB)d;s|P+QCtFp|&} zpDc=}ztK?Bu5{oYJCkYj<;j&@P1B`fhG8GC(D{x#p)rn8h6r~pF6R!+;I(^ANrljI zn;Qh@kognZM@`dnxQVE{8GYNt2?#fSQ!6!r01VsI08$=y!5c%w3zK-$Jt5EApm=0O`)FmTAD+Fm_ zVx*`#)ik9|V?6rsdq^aX@H<*2HS}GkL}^ygH}J(^P21{>_5>;)yLEf;#hB9HAMMn# zTSF4TzcGFT&{CKgnwd^ogJmi{d9lMDQ!qQo3e4`Z`k2Jk_^c#Ton#swlx}`iXvK^9 zniaz{%5rzT9j>->u9$kL)7zr%;f1N(xcH+jc=VR{hKq7Tsg;hx)ss( z^o_h~s}+3nAjjy((w(L>qj46JSe0O#d~CLvz|k;f1PDA9e2L`&^r)|%uZ>x<^41wicRHu+QN(Ge z+M~t9FU8-jDa7x?)efs3i!k{Zy2Fmg`shUs$D6G^L$s#bvK+K0wExF3SoNk|hot zb;e`=}WQGEf5V5; z4I_K)JpfDWy=k#MvR}hBinYJT>6TBgXG}PlFDL5epPC|fh}7$6pIkQ(ps8VZwd%yv zRq#!Pxy;e)a+eY=V?M|~ONaUBPJ-BHNAtcjOwpIUyVDa_jHdLx1o=F2eKV%^dxEMGUjiI^=Rtxe$BoQ^LrT`Nd0~4eW{#o9nN;+_o@BcQ@4@e z&~L^_%P#}X&g{24kS!k2&2D|(`0mNNtv*}J`?lAj0@%ydp~H^S43AoeE;A;bSsv(a z`59;oG&8}&g>LPt1K8a|@?oN(CvPVa9{qf0P`pc$0UHlM=?+<%Sq-xS%`wY-4O8rC%MYx+lfM%A*5Y3kX%&LQ!%ETo3g7%>({BCL^Eb~+=Ci9m4;Fsspmvjlvn6)Rr8S# zn$z?QY1$DQ(dFE{9EVBEnd8NjXB)sP&u?LgJvdVgNegTQGct|{roCT zb%1-k-kq+^L89?>#h(goxSr0;ud<{F$zqc6UDZ^NXO4Q9Z$F4xR%_Nkz4oEY4l;69 zS7B+5RM;d7Ow3dMzJG<#-MBsQNf+RfQuD`EEo0ol0oZCT63*y>q5jJy$1g_jUkox6 zf9Nre$pd*Zww419;XaK;9H*jq)>Cp|k7Yb>*j-b$sJe8=;y{tTgSBP3EHt`y8TPm` z{Y+p(tP{pQ*}Yt|N0C02d8+i=J(mafSa#Y}z+cuhJ1$&AS$>KUdrc7a0=-b?i3}39 z^IPWQO&rV+>NLJ+GkHz7^}mc5X8(WuuT%eT!G_N8L=r|{sEbc$$VMlBEQ$-(c>-VNJjWYw`w3G}=WILMc ze4Xj+LCk4u+FH=gr&68U21iRbCRROP~3h}