Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

solve bug in pull_dense_worker #27918

Merged
merged 4 commits into from
Oct 14, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion paddle/fluid/framework/pull_dense_worker.cc
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ void PullDenseWorker::Initialize(const TrainerDesc& param) {
}

void PullDenseWorker::CreatePinVar() {
#if (defined PADDLE_WITH_CUDA) || (defined PADDLE_WITH_PSLIB)
#if (defined PADDLE_WITH_CUDA) || (defined PADDLE_WITH_XPU)
// for (auto& v : dense_value_names_) {
// for (auto& name : v.second) {
for (int i = 0; i < dwp_param_.program_config(0).pull_dense_table_id_size();
Expand Down
44 changes: 29 additions & 15 deletions paddle/fluid/operators/slice_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -29,10 +29,12 @@ class SliceOp : public framework::OperatorWithKernel {

void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true,
"Input (Input) of slice op should not be null.");
platform::errors::InvalidArgument(
"Input (Input) of slice op should not be null."));

PADDLE_ENFORCE_EQ(ctx->HasOutput("Out"), true,
"Output (Out) of slice op should not be null.");
platform::errors::InvalidArgument(
"Output (Out) of slice op should not be null."));
auto x_var_type = ctx->GetInputsVarType("Input")[0];
auto axes = ctx->Attrs().Get<std::vector<int>>("axes");
if (x_var_type == framework::proto::VarType::LOD_TENSOR_ARRAY) {
Expand All @@ -57,7 +59,8 @@ class SliceOp : public framework::OperatorWithKernel {
}
auto in_dims = ctx->GetInputDim("Input");
PADDLE_ENFORCE_LT(in_dims.size(), 7,
"The rank of input should be less than 7.");
platform::errors::InvalidArgument(
"The rank of input should be less than 7."));
framework::DDim out_dims(in_dims);

auto starts = ctx->Attrs().Get<std::vector<int>>("starts");
Expand All @@ -76,31 +79,37 @@ class SliceOp : public framework::OperatorWithKernel {
if (ctx->HasInputs("StartsTensorList")) {
auto StartsTensorList = ctx->Inputs("StartsTensorList");
PADDLE_ENFORCE_GT(StartsTensorList.size(), 0,
"StartsTensorList size can't be zero");
platform::errors::InvalidArgument(
"StartsTensorList size can't be zero"));
starts_size = StartsTensorList.size();
}
if (ctx->HasInputs("EndsTensorList")) {
auto EndsTensorList = ctx->Inputs("EndsTensorList");
PADDLE_ENFORCE_GT(EndsTensorList.size(), 0,
"EndsTensorList size can't be zero");
platform::errors::InvalidArgument(
"EndsTensorList size can't be zero"));
ends_size = EndsTensorList.size();
}

if (ctx->HasInput("StartsTensor") == false) {
PADDLE_ENFORCE_EQ(
starts_size, axes.size(),
"The size of starts must be equal to the size of axes.");
platform::errors::InvalidArgument(
"The size of starts must be equal to the size of axes."));
}
if (ctx->HasInput("EndsTensor") == false) {
PADDLE_ENFORCE_EQ(ends_size, axes.size(),
"The size of ends must be equal to the size of axes.");
PADDLE_ENFORCE_EQ(
ends_size, axes.size(),
platform::errors::InvalidArgument(
"The size of ends must be equal to the size of axes."));
}

int dim_value, start, end;
for (size_t i = 0; i < axes.size(); ++i) {
PADDLE_ENFORCE_LT(static_cast<int>(axes[i]), in_dims.size(),
"The index of dimension in axes must be less "
"than the size of input shape.");
platform::errors::InvalidArgument(
"The index of dimension in axes must be less "
"than the size of input shape."));
if (infer_flags[i] == -1) {
out_dims[axes[i]] = -1;
} else {
Expand All @@ -112,7 +121,8 @@ class SliceOp : public framework::OperatorWithKernel {
start = std::max(start, 0);
end = std::max(end, 0);
end = std::min(end, dim_value);
PADDLE_ENFORCE_GT(end, start, "end should greater than start");
PADDLE_ENFORCE_GT(end, start, platform::errors::InvalidArgument(
"end should greater than start"));
out_dims[axes[i]] = end - start;
}
}
Expand All @@ -122,8 +132,9 @@ class SliceOp : public framework::OperatorWithKernel {
std::vector<int> new_out_shape;
for (size_t i = 0; i < decrease_axis.size(); ++i) {
if (ctx->IsRuntime() && infer_flags[i] != -1) {
PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1,
"decrease dim should be 1");
PADDLE_ENFORCE_EQ(
out_dims[decrease_axis[i]], 1,
platform::errors::InvalidArgument("decrease dim should be 1"));
}
out_dims[decrease_axis[i]] = 0;
}
Expand Down Expand Up @@ -284,9 +295,12 @@ class SliceOpGrad : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("Input"), true, "Input should not be null");
PADDLE_ENFORCE_EQ(
ctx->HasInput("Input"), true,
platform::errors::InvalidArgument("Input should not be null"));
PADDLE_ENFORCE_EQ(ctx->HasInput(framework::GradVarName("Out")), true,
"Input(Out@GRAD) should not be null");
platform::errors::InvalidArgument(
"Input(Out@GRAD) should not be null"));
auto x_var_type = ctx->GetInputsVarType("Input")[0];
if (x_var_type == framework::proto::VarType::LOD_TENSOR_ARRAY) {
// If the var type of input is LOD_TENSOR_ARRAY,
Expand Down
5 changes: 3 additions & 2 deletions paddle/fluid/operators/slice_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -191,8 +191,9 @@ class SliceKernel : public framework::OpKernel<T> {
if (decrease_axis.size() > 0) {
std::vector<int64_t> new_out_shape;
for (size_t i = 0; i < decrease_axis.size(); ++i) {
PADDLE_ENFORCE_EQ(out_dims[decrease_axis[i]], 1,
"decrease dim should be 1");
PADDLE_ENFORCE_EQ(
out_dims[decrease_axis[i]], 1,
platform::errors::InvalidArgument("decrease dim should be 1"));
out_dims[decrease_axis[i]] = 0;
}

Expand Down
87 changes: 57 additions & 30 deletions paddle/fluid/operators/space_to_depth_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,51 +31,76 @@ class SpaceToDepthOp : public framework::OperatorWithKernel {

void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"),
"Input(X) of SpaceToDepthOp should not be null.");
platform::errors::InvalidArgument(
"Input(X) of SpaceToDepthOp should not be null."));
PADDLE_ENFORCE(ctx->HasOutput("Out"),
"Output(Out) of SpaceToDepthOp should not be null.");
platform::errors::InvalidArgument(
"Output(Out) of SpaceToDepthOp should not be null."));

auto x_dims = ctx->GetInputDim("X");
PADDLE_ENFORCE_EQ(x_dims.size(), 4, "input should be a 4D tensor");
PADDLE_ENFORCE_EQ(x_dims.size(), 4, platform::errors::InvalidArgument(
"input should be a 4D tensor"));
auto blocksize = ctx->Attrs().Get<int64_t>("blocksize");

PADDLE_ENFORCE_GT(blocksize, 1, "The blocksize should be Greater than 1");
PADDLE_ENFORCE_GT(blocksize, 1,
platform::errors::InvalidArgument(
"The blocksize should be Greater than 1"));
if (ctx->IsRuntime()) {
PADDLE_ENFORCE_GT(x_dims[1], 0, "input channel should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[2], 0, "input Height should be Greater than 0");
PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0");

PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0,
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_GT(x_dims[1], 0,
platform::errors::InvalidArgument(
"input channel should be Greater than 0"));
PADDLE_ENFORCE_GT(x_dims[2], 0,
platform::errors::InvalidArgument(
"input Height should be Greater than 0"));
PADDLE_ENFORCE_GT(x_dims[3], 0,
platform::errors::InvalidArgument(
"input Width should be Greater than 0"));

PADDLE_ENFORCE_EQ(
x_dims[1] % (blocksize * blocksize), 0,
platform::errors::InvalidArgument(
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize"));
PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0,
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize"));
PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0,
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize"));
} else {
if (x_dims[1] != -1) {
PADDLE_ENFORCE_GT(x_dims[1], 0,
"input channel should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[1] % (blocksize * blocksize), 0,
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input channel should be Greater than 0"));
PADDLE_ENFORCE_EQ(
x_dims[1] % (blocksize * blocksize), 0,
platform::errors::InvalidArgument(
"input channel should be divisible of the square of "
"SpaceToDepthOp blocksize"));
}
if (x_dims[2] != -1) {
PADDLE_ENFORCE_GT(x_dims[2], 0,
"input Height should be Greater than 0");
PADDLE_ENFORCE_EQ(x_dims[2] % (blocksize), 0,
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize");
platform::errors::InvalidArgument(
"input Height should be Greater than 0"));
PADDLE_ENFORCE_EQ(
x_dims[2] % (blocksize), 0,
platform::errors::InvalidArgument(
"input Height should be divisible of the square of "
"SpaceToDepthOp blocksize"));
}

if (x_dims[3] != -1) {
PADDLE_ENFORCE_GT(x_dims[3], 0, "input Width should be Greater than 0");

PADDLE_ENFORCE_EQ(x_dims[3] % (blocksize), 0,
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize");
PADDLE_ENFORCE_GT(x_dims[3], 0,
platform::errors::InvalidArgument(
"input Width should be Greater than 0"));

PADDLE_ENFORCE_EQ(
x_dims[3] % (blocksize), 0,
platform::errors::InvalidArgument(
"input Width should be divisible of the square of "
"SpaceToDepthOp blocksize"));
}
}

Expand Down Expand Up @@ -156,9 +181,11 @@ class SpaceToDepthGradOp : public framework::OperatorWithKernel {
using framework::OperatorWithKernel::OperatorWithKernel;

void InferShape(framework::InferShapeContext* ctx) const override {
PADDLE_ENFORCE(ctx->HasInput("X"), "Input(X) shouldn't be null.");
PADDLE_ENFORCE(ctx->HasInput("X"), platform::errors::InvalidArgument(
"Input(X) shouldn't be null."));
PADDLE_ENFORCE(ctx->HasInput(framework::GradVarName("Out")),
"Input(Out@GRAD) shouldn't be null.");
platform::errors::InvalidArgument(
"Input(Out@GRAD) shouldn't be null."));
ctx->SetOutputDim(framework::GradVarName("X"), ctx->GetInputDim("X"));
}

Expand Down
13 changes: 8 additions & 5 deletions paddle/fluid/operators/split_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,11 @@ class SplitOp : public framework::OperatorWithKernel {

void InferShape(framework::InferShapeContext *ctx) const override {
PADDLE_ENFORCE_EQ(ctx->HasInput("X"), true,
"Input(X) of SplitOp should not be null.");
platform::errors::InvalidArgument(
"Input(X) of SplitOp should not be null."));
PADDLE_ENFORCE_GE(ctx->Outputs("Out").size(), 1UL,
"Outputs(Out) of SplitOp should not be empty.");
platform::errors::InvalidArgument(
"Outputs(Out) of SplitOp should not be empty."));
auto in_dims = ctx->GetInputDim("X");
auto outs_names = ctx->Outputs("Out");
size_t axis = static_cast<size_t>(ctx->Attrs().Get<int>("axis"));
Expand All @@ -37,9 +39,10 @@ class SplitOp : public framework::OperatorWithKernel {
const size_t outs_number = outs_names.size();

if (sections.size() > 0) {
PADDLE_ENFORCE_EQ(sections.size(), outs_number,
"tensor split sections size "
"should be equal to output size.");
PADDLE_ENFORCE_EQ(
sections.size(), outs_number,
platform::errors::InvalidArgument("tensor split sections size "
"should be equal to output size."));
}

if (ctx->HasInput("AxisTensor")) {
Expand Down
48 changes: 28 additions & 20 deletions paddle/fluid/operators/split_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -33,12 +33,14 @@ static inline std::vector<framework::DDim> UpdateOutsDims(
int64_t input_axis_dim = in_dims[axis];
if (num > 0) {
if (is_runtime || input_axis_dim > 0) {
PADDLE_ENFORCE_EQ(input_axis_dim % num, 0,
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But received Attr(num_or_sections) "
"= %d, input(X)'s shape = [%s], Attr(dim) = %d.",
num, in_dims, axis);
PADDLE_ENFORCE_EQ(
input_axis_dim % num, 0,
platform::errors::InvalidArgument(
"The input's size along the split dimension "
"must be evenly divisible by Attr(num_or_sections). "
"But received Attr(num_or_sections) "
"= %d, input(X)'s shape = [%s], Attr(dim) = %d.",
num, in_dims, axis));
size_t out_axis_dim = input_axis_dim / num;

for (auto& out_dim : outs_dims) {
Expand All @@ -64,11 +66,13 @@ static inline std::vector<framework::DDim> UpdateOutsDims(
}

if (each_section_is_known) {
PADDLE_ENFORCE_LE(num_of_unk, 1,
"Only one dimension value of Attr(num_or_sections) "
"in SplitOp can be -1. "
"But received Attr(num_or_sections) = [%s].",
framework::make_ddim(sections));
PADDLE_ENFORCE_LE(
num_of_unk, 1,
platform::errors::InvalidArgument(
"Only one dimension value of Attr(num_or_sections) "
"in SplitOp can be -1. "
"But received Attr(num_or_sections) = [%s].",
framework::make_ddim(sections)));
}

if (unk_dim_idx != -1) {
Expand All @@ -77,21 +81,25 @@ static inline std::vector<framework::DDim> UpdateOutsDims(
// the following check will fail.
PADDLE_ENFORCE_LT(
sum_of_section, input_axis_dim,
"Sum of Attr(num_or_sections) other than unknown section "
"must be less than the input's size "
"along the split dimension. But received Attr(num_or_sections) "
"= [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis);
platform::errors::InvalidArgument(
"Sum of Attr(num_or_sections) other than unknown section "
"must be less than the input's "
"size "
"along the split dimension. But received Attr(num_or_sections) "
"= [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis));
if (each_section_is_known) {
sections[unk_dim_idx] = input_axis_dim - sum_of_section;
}
} else {
PADDLE_ENFORCE_EQ(
sum_of_section, input_axis_dim,
"Sum of Attr(num_or_sections) must be equal to the input's size "
"along the split dimension. But received Attr(num_or_sections)"
" = [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis);
platform::errors::InvalidArgument(
"Sum of Attr(num_or_sections) must be equal to the input's "
"size "
"along the split dimension. But received Attr(num_or_sections)"
" = [%s], input(X)'s shape = [%s], Attr(dim) = %d.",
framework::make_ddim(sections), in_dims, axis));
}
}
for (int i = 0; i < outs_number; ++i) {
Expand Down