Skip to content

Commit

Permalink
replace cross_entropy in test*.py except python/paddle/fluid/tests/un…
Browse files Browse the repository at this point in the history
…ittests/*.py (PaddlePaddle#48978)
  • Loading branch information
kangguangli authored Dec 13, 2022
1 parent 60ef229 commit 8f8d1fd
Show file tree
Hide file tree
Showing 15 changed files with 40 additions and 25 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@
from paddle.fluid.contrib.slim.quantization import AddQuantDequantPass
from paddle.fluid import core
from paddle.fluid.layer_helper import LayerHelper
import paddle.nn.functional as F

paddle.enable_static()

Expand Down Expand Up @@ -75,8 +76,8 @@ def pact(x, name=None):
learning_rate=1,
)
u_param = helper.create_parameter(attr=u_param_attr, shape=[1], dtype=dtype)
x = paddle.subtract(x, fluid.layers.relu(paddle.subtract(x, u_param)))
x = paddle.add(x, fluid.layers.relu(paddle.subtract(-u_param, x)))
x = paddle.subtract(x, F.relu(paddle.subtract(x, u_param)))
x = paddle.add(x, F.relu(paddle.subtract(-u_param, x)))

return x

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.jit.dy2static.utils import ast_to_func
from paddle.utils import gast

Expand Down Expand Up @@ -60,7 +61,7 @@ def test_ast2func_dygraph(self):

def test_ast2func_static(self):
def func(x):
y = fluid.layers.relu(x)
y = F.relu(x)
loss = paddle.mean(y)
return loss

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@

import paddle
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.jit.api import declarative
from paddle.jit.dy2static.program_translator import ProgramTranslator
from paddle.jit.dy2static.utils import Dygraph2StaticException
Expand Down Expand Up @@ -269,7 +270,7 @@ def test_ast_to_func(self):

# Test to call function ahead caller.
def relu(x):
return fluid.layers.relu(x)
return F.relu(x)


def call_external_func(x, label=None):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.jit.api import declarative


Expand Down Expand Up @@ -48,7 +49,7 @@ def call_lambda_in_func(x):

add_func = lambda x: x + 1

y = paddle.mean((lambda x: fluid.layers.relu(x))(x))
y = paddle.mean((lambda x: F.relu(x))(x))
out = add_func(y) if y > 1 and y < 2 else (lambda x: x**2)(y)

return out
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.jit.api import declarative
from paddle.jit.dy2static.loop_transformer import NameVisitor
from paddle.utils import gast
Expand Down Expand Up @@ -51,7 +52,7 @@ def while_loop_dyfun_with_conflict_var(x):

def relu(y):
# 'y' is not visible outside the scope.
return fluid.layers.relu(y)
return F.relu(y)

while x < 10:
# If a tmp variable is created which has same name
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.dygraph import Layer, to_variable
from paddle.jit import ProgramTranslator
from paddle.jit.api import declarative
Expand All @@ -45,7 +46,7 @@ def forward(self, x):
x = paddle.reshape(x, shape=[1, 4])
x = self.affine1(x)
x = fluid.layers.dropout(x, self.dropout_ratio)
x = fluid.layers.relu(x)
x = F.relu(x)
action_scores = self.affine2(x)

log_prob = paddle.nn.functional.softmax(action_scores, axis=1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np

import paddle
import paddle.nn.functional as F
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest

Expand Down Expand Up @@ -63,7 +64,7 @@ def build_model(self):
conv3 = paddle.static.nn.conv2d(
add1, num_filters=8, filter_size=8, bias_attr=False
)
out = paddle.fluid.layers.relu(conv3, **self.attrs)
out = F.relu(conv3, **self.attrs)
self.fetch_list = [out.name]

def run_model(self, exec_mode):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
import numpy as np

import paddle
import paddle.nn.functional as F
import paddle.static
from paddle.fluid.tests.unittests.ipu.op_test_ipu import IPUOpTest

Expand Down Expand Up @@ -147,7 +148,7 @@ def set_test_op(self):

class TestRelu(TestBase):
def set_test_op(self):
self.op = paddle.fluid.layers.relu
self.op = F.relu
self.op_attrs = {}


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.core import PassVersionChecker


Expand Down Expand Up @@ -69,7 +70,7 @@ class ElementwiseActivationMkldnnFusePassTest_Add_Relu(
):
def set_params(self):
self.operand = paddle.add
self.act = fluid.layers.relu
self.act = F.relu


class ElementwiseActivationMkldnnFusePassTest_Add_Tanh(
Expand Down Expand Up @@ -169,7 +170,7 @@ class ElementwiseActivationMkldnnFusePassTest_Sub_Relu(
):
def set_params(self):
self.operand = paddle.subtract
self.act = fluid.layers.relu
self.act = F.relu


class ElementwiseActivationMkldnnFusePassTest_Sub_Tanh(
Expand Down Expand Up @@ -261,7 +262,7 @@ class ElementwiseActivationMkldnnFusePassTest_Mul_Relu(
):
def set_params(self):
self.operand = paddle.multiply
self.act = fluid.layers.relu
self.act = F.relu


class ElementwiseActivationMkldnnFusePassTest_Mul_Tanh(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F
from paddle.fluid.core import PassVersionChecker


Expand All @@ -33,7 +34,7 @@ def setUp(self):
data, num_filters=3, filter_size=3, bias_attr=False
)
softmax_out = paddle.nn.functional.softmax(conv_out_1)
relu_out = fluid.layers.relu(conv_out_1)
relu_out = F.relu(conv_out_1)
eltwise_out = paddle.add(softmax_out, relu_out)

self.pass_name = 'mkldnn_inplace_pass'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

import paddle
import paddle.fluid as fluid
import paddle.nn.functional as F


class TestMKLDNNMatmulFuseOp(InferencePassTest):
Expand All @@ -41,7 +42,7 @@ def make_network(self):
out = paddle.transpose(out, perm=[0, 2, 1, 3])
out = paddle.reshape(out, [0, 0, self.shape_y[0] * self.shape_y[2]])

out = fluid.layers.relu(out)
out = F.relu(out)
return out

def setUp(self):
Expand Down Expand Up @@ -107,7 +108,7 @@ def make_network(self):
out = paddle.transpose(out, perm=[0, 1, 2, 3]) # breaks pattern
out = paddle.reshape(out, [0, 0, self.shape_y[0] * self.shape_y[2]])

out = fluid.layers.relu(out)
out = F.relu(out)
return out


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
import paddle.static.nn as nn
from paddle.fluid.core import AnalysisConfig, PassVersionChecker

Expand All @@ -47,7 +48,7 @@ def setUp(self):
self.fetch_list = [out]

def append_act(self, x):
return fluid.layers.relu(x)
return F.relu(x)

def test_check_output(self):
if core.is_compiled_with_cuda():
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.core import AnalysisConfig, PassVersionChecker


Expand Down Expand Up @@ -52,7 +53,7 @@ def network():
cout = paddle.reshape(conv_out, shape=[1, 1, 12544])
elif self.conv_groups == 4:
cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout)
result = F.relu(cout)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
Expand Down Expand Up @@ -160,7 +161,7 @@ def network():
act=None,
)
cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout)
result = F.relu(cout)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
Expand Down Expand Up @@ -266,7 +267,7 @@ def network():
cout = paddle.reshape(conv_out, shape=[1, 1, 12544])
elif self.conv_groups == 4:
cout = paddle.reshape(conv_out, shape=[1, 1, 10816])
result = fluid.layers.relu(cout)
result = F.relu(cout)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.core import AnalysisConfig, PassVersionChecker


Expand All @@ -37,7 +38,7 @@ def network():
bias_attr=False,
act="relu",
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
Expand Down Expand Up @@ -109,7 +110,7 @@ def network():
act=None,
)
c_out = paddle.reshape(fc_out, shape=[0, 784])
result = fluid.layers.relu(c_out)
result = F.relu(c_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
Expand Down Expand Up @@ -183,7 +184,7 @@ def network():
act=None,
)
c_out = paddle.reshape(fc_out, shape=[1, 1, 2744])
result = fluid.layers.relu(c_out)
result = F.relu(c_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=label_shape,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle.nn.functional as F
from paddle.fluid.core import AnalysisConfig, PassVersionChecker


Expand All @@ -46,7 +47,7 @@ def network():
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
Expand Down Expand Up @@ -148,7 +149,7 @@ def network():
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
Expand Down Expand Up @@ -249,7 +250,7 @@ def network():
bias_attr=False,
act=None,
)
result = fluid.layers.relu(fc_out)
result = F.relu(fc_out)
loss = paddle.nn.functional.cross_entropy(
input=result,
label=self.label,
Expand Down

0 comments on commit 8f8d1fd

Please sign in to comment.