From 6db2bbabd8e40c485088523f352572787ac21e03 Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Mon, 29 Nov 2021 05:38:50 +0000 Subject: [PATCH 1/2] Refactored eager legacy namespace --- .../auto_code_generator/eager_generator.cc | 8 +++---- paddle/fluid/eager/legacy/amp_auto_cast.cc | 22 +++++++++++-------- paddle/fluid/eager/legacy/amp_auto_cast.h | 2 ++ paddle/fluid/eager/legacy/execution_context.h | 2 ++ .../fluid/eager/legacy/infer_shape_context.h | 2 ++ .../eager/legacy/infer_var_type_context.h | 2 ++ paddle/fluid/eager/legacy/op_runner.cc | 13 ++++++----- paddle/fluid/eager/legacy/op_runner.h | 5 ++++- .../fluid/eager/legacy/prepared_operator.cc | 9 +++++--- paddle/fluid/eager/legacy/prepared_operator.h | 2 ++ paddle/fluid/eager/legacy/tensor_helper.cc | 3 +++ paddle/fluid/eager/legacy/tensor_helper.h | 6 ++++- paddle/fluid/eager/legacy/type_def.h | 5 +++++ .../fluid/framework/details/nan_inf_utils.h | 6 ++--- 14 files changed, 61 insertions(+), 26 deletions(-) diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index bc4775f218916..c0714775da852 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -779,7 +779,7 @@ static std::pair GenerateForwardFunctionContents( ,ConstructDuplicableOutput(Out1Num)} }; // According to op_proto->attrs() - egr::RunOp("op_type", ins, outs, attr_map, + egr::legacy::RunOp("op_type", ins, outs, attr_map, Controller.Instance().GetExpectedPlace(), {}); // According to fwd_outputs_names @@ -894,7 +894,7 @@ static std::pair GenerateForwardFunctionContents( const char* FWD_TRACE_OP_TEMPLATE = " paddle::framework::AttributeMap attrs = attr_map;\n" " paddle::framework::AttributeMap default_attrs;\n" - " egr::RunOp(\"%s\", ins, outs, attrs, \n" + " egr::legacy::RunOp(\"%s\", ins, outs, attrs, \n" " egr::Controller::Instance().GetExpectedPlace(),\n" " &default_attrs, true, {});\n"; std::string trace_op_str = @@ -1052,7 +1052,7 @@ static std::string GenerateGradNodeCCContents( // Visit each OpBase for(auto iter = "grad_node->begin()"; iter < "grad_node->end()"; iter++) { // Simply pass entire attribute map to kernels - egr::RunOp("iter->Type()", ins, outs, this->attr_map_, + egr::legacy::RunOp("iter->Type()", ins, outs, this->attr_map_, egr::Controller::Instance().ExpectedPlace(), false, {}); } @@ -1180,7 +1180,7 @@ static std::string GenerateGradNodeCCContents( " // Pass the entire attribute map to TraceOp\n" " // The underlying kernel will pickup whatever attribute they need " "at runtime\n" - " egr::RunOp(\"%s\", ins, outs, this->attr_map_,\n" + " egr::legacy::RunOp(\"%s\", ins, outs, this->attr_map_,\n" " egr::Controller::Instance().GetExpectedPlace(),\n" " &this->default_attr_map_, false, {});\n"; trace_opbase_str = paddle::string::Sprintf(TRACE_OP_TEMPLATE, op_base_type); diff --git a/paddle/fluid/eager/legacy/amp_auto_cast.cc b/paddle/fluid/eager/legacy/amp_auto_cast.cc index b86cb7a48f616..9724938afc82f 100644 --- a/paddle/fluid/eager/legacy/amp_auto_cast.cc +++ b/paddle/fluid/eager/legacy/amp_auto_cast.cc @@ -20,6 +20,7 @@ #include "paddle/fluid/framework/operator.h" namespace egr { +namespace legacy { AmpOperators::AmpOperators() : allow_ops_(new std::unordered_set()), @@ -85,12 +86,12 @@ std::ostream& operator<<(std::ostream& os, AmpOperators& ops) { inline std::string GetDtypeStr( const std::shared_ptr& tensor) { return paddle::framework::DataTypeToString( - egr::GetDtypeFromVar(tensor->Var())); + egr::legacy::GetDtypeFromVar(tensor->Var())); } inline bool NeedCast(const std::shared_ptr& tensor) { - auto place = egr::GetPlaceFromVar(tensor->Var()); - auto data_type = egr::GetDtypeFromVar(tensor->Var()); + auto place = egr::legacy::GetPlaceFromVar(tensor->Var()); + auto data_type = egr::legacy::GetDtypeFromVar(tensor->Var()); if (paddle::platform::is_gpu_place(place) || paddle::platform::is_cuda_pinned_place(place) || paddle::platform::is_xpu_place(place)) { @@ -109,7 +110,7 @@ static inline std::shared_ptr CastToType( const std::shared_ptr& tensor, const paddle::framework::proto::VarType::Type dst_type) { NameTensorMap ins = {{"X", {tensor}}}; - auto in_data_type = egr::GetDtypeFromVar(tensor->Var()); + auto in_data_type = egr::legacy::GetDtypeFromVar(tensor->Var()); paddle::framework::AttributeMap attrs = {{"in_dtype", in_data_type}, {"out_dtype", dst_type}}; auto out = std::shared_ptr(new egr::EagerTensor()); @@ -127,7 +128,8 @@ static inline std::shared_ptr CastToType( static inline std::shared_ptr CastToFP16( const std::shared_ptr& tensor) { auto dst_type = paddle::framework::proto::VarType::FP16; - if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) { + if (NeedCast(tensor) && + (egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) { return CastToType(tensor, dst_type); } return tensor; @@ -136,7 +138,8 @@ static inline std::shared_ptr CastToFP16( static inline std::shared_ptr CastToFP32( const std::shared_ptr& tensor) { auto dst_type = paddle::framework::proto::VarType::FP32; - if (NeedCast(tensor) && (egr::GetDtypeFromVar(tensor->Var()) != dst_type)) { + if (NeedCast(tensor) && + (egr::legacy::GetDtypeFromVar(tensor->Var()) != dst_type)) { return CastToType(tensor, dst_type); } return tensor; @@ -147,9 +150,9 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType( auto dst_type = paddle::framework::proto::VarType::FP16; for (const auto& pair : ins) { for (const auto& tensor : pair.second) { - if (egr::GetDtypeFromVar(tensor->Var()) == + if (egr::legacy::GetDtypeFromVar(tensor->Var()) == paddle::framework::proto::VarType::FP32) { - dst_type = egr::GetDtypeFromVar(tensor->Var()); + dst_type = egr::legacy::GetDtypeFromVar(tensor->Var()); break; } } @@ -160,7 +163,7 @@ static inline paddle::framework::proto::VarType::Type GetPromoteType( if (op_type == "moving_average_abs_max_scale") { for (const auto& pair : ins) { if (pair.first == "X" && - egr::GetDtypeFromVar(pair.second.front()->Var()) == + egr::legacy::GetDtypeFromVar(pair.second.front()->Var()) == paddle::framework::proto::VarType::FP16) { dst_type = paddle::framework::proto::VarType::FP16; } @@ -255,4 +258,5 @@ NameTensorMap CastPureFp16Inputs(const std::string& op_type, return new_ins; } +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/amp_auto_cast.h b/paddle/fluid/eager/legacy/amp_auto_cast.h index c4c1b6d352f1d..ab5e4ac1453a0 100644 --- a/paddle/fluid/eager/legacy/amp_auto_cast.h +++ b/paddle/fluid/eager/legacy/amp_auto_cast.h @@ -24,6 +24,7 @@ #include "paddle/fluid/eager/legacy/type_def.h" namespace egr { +namespace legacy { // NOTE(zhiqiu): only O1 and O2 are valid now enum class AmpLevel { @@ -92,4 +93,5 @@ NameTensorMap AutoCastInputs(const std::string& op_type, NameTensorMap CastPureFp16Inputs(const std::string& op_type, const NameTensorMap& ins); +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/execution_context.h b/paddle/fluid/eager/legacy/execution_context.h index ad252ecb89a19..e51b6bf5417c2 100644 --- a/paddle/fluid/eager/legacy/execution_context.h +++ b/paddle/fluid/eager/legacy/execution_context.h @@ -22,6 +22,7 @@ #include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/variable.h" namespace egr { +namespace legacy { class EagerExecutionContext : public paddle::framework::ExecutionContext { using Variable = paddle::framework::Variable; @@ -209,4 +210,5 @@ class EagerExecutionContext : public paddle::framework::ExecutionContext { const paddle::framework::AttributeMap& default_attrs_; }; +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/infer_shape_context.h b/paddle/fluid/eager/legacy/infer_shape_context.h index 993532b99b4aa..bf8ebed36a749 100644 --- a/paddle/fluid/eager/legacy/infer_shape_context.h +++ b/paddle/fluid/eager/legacy/infer_shape_context.h @@ -25,6 +25,7 @@ #include "paddle/fluid/framework/type_defs.h" #include "paddle/fluid/framework/var_type.h" namespace egr { +namespace legacy { class EagerInferShapeContext : public paddle::framework::InferShapeContext { using DDim = paddle::framework::DDim; @@ -401,4 +402,5 @@ class EagerInferShapeContext : public paddle::framework::InferShapeContext { const std::string op_type_; }; +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/infer_var_type_context.h b/paddle/fluid/eager/legacy/infer_var_type_context.h index 2bd156df40c12..8e7bbef37d805 100644 --- a/paddle/fluid/eager/legacy/infer_var_type_context.h +++ b/paddle/fluid/eager/legacy/infer_var_type_context.h @@ -29,6 +29,7 @@ #include "paddle/pten/include/core.h" namespace egr { +namespace legacy { // infer var type context for imperative mode class TensorRuntimeInferVarTypeContext @@ -255,4 +256,5 @@ class TensorRuntimeInferVarTypeContext const paddle::framework::AttributeMap& default_attrs_; }; +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/op_runner.cc b/paddle/fluid/eager/legacy/op_runner.cc index c8858cbceca2d..2cd86ecfd8ba7 100644 --- a/paddle/fluid/eager/legacy/op_runner.cc +++ b/paddle/fluid/eager/legacy/op_runner.cc @@ -30,6 +30,7 @@ DECLARE_string(tracer_mkldnn_ops_on); DECLARE_string(tracer_mkldnn_ops_off); namespace egr { +namespace legacy { void OpRunImpl(const paddle::framework::OperatorBase& op, const NameTensorMap& ins, const NameTensorMap& outs, @@ -43,8 +44,8 @@ void OpRunImpl(const paddle::framework::OperatorBase& op, "Only support operator with kernel in Dygraph mode.")); auto& info = op.Info(); if (info.infer_var_type_) { - egr::TensorRuntimeInferVarTypeContext infer_var_type_ctx(ins, outs, attrs, - default_attrs); + egr::legacy::TensorRuntimeInferVarTypeContext infer_var_type_ctx( + ins, outs, attrs, default_attrs); info.infer_var_type_(&infer_var_type_ctx); } @@ -76,10 +77,10 @@ void OpRunImpl(const paddle::framework::OperatorBase& op, * after the execution of op, but the original input is directly * overwritten in the previous dynamic graph implemention. */ - auto prepared_op = egr::PreparedOp::Prepare(ins, outs, *op_kernel, place, - attrs, default_attrs); + auto prepared_op = egr::legacy::PreparedOp::Prepare( + ins, outs, *op_kernel, place, attrs, default_attrs); auto tmp_ins_ptr = - egr::PrepareData(*op_kernel, ins, prepared_op.kernel_type()); + egr::legacy::PrepareData(*op_kernel, ins, prepared_op.kernel_type()); if (tmp_ins_ptr == nullptr) { prepared_op.Run(ins, outs, attrs, default_attrs); } else { @@ -188,4 +189,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins, // program_desc_tracer_->InsertOp(type, new_ins, outs, attrs); // } } + +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/op_runner.h b/paddle/fluid/eager/legacy/op_runner.h index 84745dfe6d737..33d557f0b8518 100644 --- a/paddle/fluid/eager/legacy/op_runner.h +++ b/paddle/fluid/eager/legacy/op_runner.h @@ -19,6 +19,7 @@ #include "paddle/pten/core/tensor_meta.h" namespace egr { +namespace legacy { void RunOp(const std::string& type, const NameTensorMap& ins, const NameTensorMap& outs, paddle::framework::AttributeMap attrs, @@ -26,4 +27,6 @@ void RunOp(const std::string& type, const NameTensorMap& ins, paddle::framework::AttributeMap* default_attrs, bool override_default_attr_map, const std::map& inplace_map = {}); -} + +} // namespace legacy +} // namespace egr diff --git a/paddle/fluid/eager/legacy/prepared_operator.cc b/paddle/fluid/eager/legacy/prepared_operator.cc index 3db154b70a11e..547ee86967491 100644 --- a/paddle/fluid/eager/legacy/prepared_operator.cc +++ b/paddle/fluid/eager/legacy/prepared_operator.cc @@ -26,6 +26,7 @@ DECLARE_bool(check_nan_inf); DECLARE_bool(run_pten_kernel); namespace egr { +namespace legacy { const paddle::framework::Tensor* GetTensorFromVar( const paddle::framework::Variable& var) { @@ -96,9 +97,9 @@ PreparedOp PrepareImpl(const NameTensorMap& ins, const NameTensorMap& outs, #endif // 1. get expected kernel key - auto dygraph_exe_ctx = - egr::EagerExecutionContext(op, paddle::framework::Scope(), *dev_ctx, ctx, - ins, outs, attrs, default_attrs); + auto dygraph_exe_ctx = egr::legacy::EagerExecutionContext( + op, paddle::framework::Scope(), *dev_ctx, ctx, ins, outs, attrs, + default_attrs); auto expected_kernel_key = op.GetExpectedKernelType(dygraph_exe_ctx); VLOG(3) << "expected_kernel_key:" << expected_kernel_key; @@ -251,4 +252,6 @@ std::shared_ptr PrepareData( } return tmp_ins_ptr; } + +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/prepared_operator.h b/paddle/fluid/eager/legacy/prepared_operator.h index bacb867095cb9..9ba186b14e3b3 100644 --- a/paddle/fluid/eager/legacy/prepared_operator.h +++ b/paddle/fluid/eager/legacy/prepared_operator.h @@ -40,6 +40,7 @@ class DeviceContext; } // namespace paddle namespace egr { +namespace legacy { const paddle::framework::Tensor* GetTensorFromVar( const paddle::framework::Variable& var); @@ -79,4 +80,5 @@ class PreparedOp { paddle::platform::DeviceContext* dev_ctx_; }; +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/tensor_helper.cc b/paddle/fluid/eager/legacy/tensor_helper.cc index d98b75a570857..97cac5a340419 100644 --- a/paddle/fluid/eager/legacy/tensor_helper.cc +++ b/paddle/fluid/eager/legacy/tensor_helper.cc @@ -25,6 +25,7 @@ #include "paddle/fluid/platform/place.h" namespace egr { +namespace legacy { void InitializeVariable(paddle::framework::Variable *var, paddle::framework::proto::VarType::Type var_type) { @@ -108,4 +109,6 @@ const paddle::platform::Place &GetPlaceFromVar( paddle::framework::ToTypeName(var.Type()))); } } + +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/eager/legacy/tensor_helper.h b/paddle/fluid/eager/legacy/tensor_helper.h index cc87b4c39c9c3..f87ab70c93686 100644 --- a/paddle/fluid/eager/legacy/tensor_helper.h +++ b/paddle/fluid/eager/legacy/tensor_helper.h @@ -19,6 +19,8 @@ #include "paddle/pten/api/all.h" #include "paddle/pten/include/core.h" namespace egr { +namespace legacy { + void InitializeVariable(paddle::framework::Variable* var, paddle::framework::proto::VarType::Type var_type); paddle::framework::proto::VarType::Type GetDtypeFromVar( @@ -27,4 +29,6 @@ const paddle::platform::Place& GetPlaceFromVar( const paddle::framework::Variable& var); void CopyVariable(const paddle::framework::Variable& src_var, paddle::framework::Variable* dst_var); -} + +} // namespace legacy +} // namespace egr diff --git a/paddle/fluid/eager/legacy/type_def.h b/paddle/fluid/eager/legacy/type_def.h index 2d4f723e6b92b..c209c48e384aa 100644 --- a/paddle/fluid/eager/legacy/type_def.h +++ b/paddle/fluid/eager/legacy/type_def.h @@ -22,6 +22,9 @@ namespace egr { class EagerTensor; + +namespace legacy { + namespace details { template struct NameVarMapTrait {}; @@ -36,4 +39,6 @@ template using NameMap = typename details::NameVarMapTrait::Type; using NameTensorMap = NameMap; + +} // namespace legacy } // namespace egr diff --git a/paddle/fluid/framework/details/nan_inf_utils.h b/paddle/fluid/framework/details/nan_inf_utils.h index d458e88a5619a..be5ffef27caae 100644 --- a/paddle/fluid/framework/details/nan_inf_utils.h +++ b/paddle/fluid/framework/details/nan_inf_utils.h @@ -55,9 +55,9 @@ void CheckOpHasNanOrInfInDygraph(const std::string& op_type, } template -static void CheckOpHasNanOrInfInEager(const std::string& op_type, - const egr::NameMap& op_outs, - platform::Place place) { +static void CheckOpHasNanOrInfInEager( + const std::string& op_type, const egr::legacy::NameMap& op_outs, + platform::Place place) { for (const auto& pair : op_outs) { for (const auto& tensor : pair.second) { auto* var = tensor->MutableVar(); From 1078679232403066cbe328e3138233244fbff73a Mon Sep 17 00:00:00 2001 From: jim19930609 Date: Mon, 29 Nov 2021 06:27:14 +0000 Subject: [PATCH 2/2] Fixed namespace issues --- paddle/fluid/eager/api/utils/hook_utils.cc | 2 + paddle/fluid/eager/api/utils/hook_utils.h | 2 + paddle/fluid/eager/api/utils/tensor_utils.cc | 2 + paddle/fluid/eager/api/utils/tensor_utils.h | 2 + .../performance_tests/benchmark_eager_cpu.cc | 4 +- .../performance_tests/benchmark_eager_cuda.cc | 4 +- .../eager/tests/task_tests/backward_test.cc | 46 ++++---- .../cross_batch_accumulation_test.cc | 22 ++-- .../tests/task_tests/eager_utils_test.cc | 14 +-- .../tests/task_tests/forward_autograd_test.cc | 31 +++--- .../tests/task_tests/fwd_bwd_joint_test.cc | 101 +++++++++--------- .../eager/tests/task_tests/generated_test.cc | 33 +++--- .../fluid/eager/tests/task_tests/hook_test.cc | 46 ++++---- .../tests/task_tests/tensor_utils_test.cc | 21 ++-- 14 files changed, 158 insertions(+), 172 deletions(-) diff --git a/paddle/fluid/eager/api/utils/hook_utils.cc b/paddle/fluid/eager/api/utils/hook_utils.cc index 7f85d014fa842..85ff6687e0dbe 100644 --- a/paddle/fluid/eager/api/utils/hook_utils.cc +++ b/paddle/fluid/eager/api/utils/hook_utils.cc @@ -20,6 +20,7 @@ #include "paddle/pten/core/dense_tensor.h" namespace egr { +namespace egr_utils_api { void RegisterGradientHookForTensor( const egr::EagerTensor& tensor, @@ -90,4 +91,5 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) { } } +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/api/utils/hook_utils.h b/paddle/fluid/eager/api/utils/hook_utils.h index bf320f0b15d4a..7e4faa5a2c701 100644 --- a/paddle/fluid/eager/api/utils/hook_utils.h +++ b/paddle/fluid/eager/api/utils/hook_utils.h @@ -18,6 +18,7 @@ #include "paddle/fluid/eager/grad_node_info.h" #include "paddle/pten/api/all.h" namespace egr { +namespace egr_utils_api { void RegisterGradientHookForTensor( const egr::EagerTensor& tensor, @@ -27,4 +28,5 @@ void RegisterReduceHookForTensor(const egr::EagerTensor& tensor, const std::function& hook); void RetainGradForTensor(const egr::EagerTensor& tensor); +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/api/utils/tensor_utils.cc b/paddle/fluid/eager/api/utils/tensor_utils.cc index 9dbb308a2c906..ad6c34b7cf86c 100644 --- a/paddle/fluid/eager/api/utils/tensor_utils.cc +++ b/paddle/fluid/eager/api/utils/tensor_utils.cc @@ -26,6 +26,7 @@ #include "paddle/fluid/framework/variable.h" namespace egr { +namespace egr_utils_api { bool IsLeafTensor(const egr::EagerTensor& target) { std::shared_ptr grad_node = EagerUtils::grad_node(target); @@ -58,4 +59,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim, return out; } +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/api/utils/tensor_utils.h b/paddle/fluid/eager/api/utils/tensor_utils.h index a0d8caf3cb307..b3c4b59682320 100644 --- a/paddle/fluid/eager/api/utils/tensor_utils.h +++ b/paddle/fluid/eager/api/utils/tensor_utils.h @@ -18,6 +18,7 @@ #include "paddle/pten/api/all.h" namespace egr { +namespace egr_utils_api { // If and only if the tensor holds an AccumulationNode // Then it's treated as a leaf tensor @@ -29,4 +30,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim, const pten::DataLayout& layout, float value, bool is_leaf = true); +} // namespace egr_utils_api } // namespace egr diff --git a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc index 0a84f3b523aee..cd60ccb259cde 100644 --- a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc +++ b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cpu.cc @@ -32,8 +32,8 @@ #include "gperftools/profiler.h" #endif -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT +using namespace egr; // NOLINT +using namespace egr_utils_api; // NOLINT // Disable pten path DECLARE_bool(run_pten_kernel); diff --git a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc index b373802c79eb4..119629116f0a9 100644 --- a/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc +++ b/paddle/fluid/eager/tests/performance_tests/benchmark_eager_cuda.cc @@ -31,8 +31,8 @@ #include "gperftools/profiler.h" #endif -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT +using namespace egr; // NOLINT +using namespace egr_utils_api; // NOLINT DECLARE_bool(run_pten_kernel); diff --git a/paddle/fluid/eager/tests/task_tests/backward_test.cc b/paddle/fluid/eager/tests/task_tests/backward_test.cc index d63cff23ba9c8..0ec86b7cc360c 100644 --- a/paddle/fluid/eager/tests/task_tests/backward_test.cc +++ b/paddle/fluid/eager/tests/task_tests/backward_test.cc @@ -30,19 +30,17 @@ #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/tensor_meta.h" -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(Backward, SingleNodeEmptyGrad) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor target_tensor = CreateTensorWithValue( + egr::EagerTensor target_tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); @@ -67,7 +65,7 @@ TEST(Backward, SingleNodeEmptyGrad) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node0 -> AccumulationNode via Edge auto meta = egr::AutogradMeta(); @@ -80,26 +78,26 @@ TEST(Backward, SingleNodeEmptyGrad) { RunBackward(outs, {}); // Check Output Value - CompareGradTensorWithValue(leaf_tensor, 5.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 5.0); } TEST(Backward, SingleNodeCustomGrad) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); std::vector grad_tensors; // Create Grad Tensor - egr::EagerTensor grad_tensor = CreateTensorWithValue( + egr::EagerTensor grad_tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/); grad_tensors.emplace_back(std::move(grad_tensor)); @@ -128,7 +126,7 @@ TEST(Backward, SingleNodeCustomGrad) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node0 -> AccumulationNode via Edge auto meta = egr::AutogradMeta(); @@ -141,7 +139,7 @@ TEST(Backward, SingleNodeCustomGrad) { RunBackward(target_tensors, grad_tensors); // Check Output Value - CompareGradTensorWithValue(leaf_tensor, 50.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 50.0); } /* @@ -153,14 +151,14 @@ Node0 */ TEST(Backward, LinearNodes) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -202,7 +200,7 @@ TEST(Backward, LinearNodes) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node1 -> AccumulationNode via Edge auto meta1 = egr::AutogradMeta(); @@ -215,7 +213,7 @@ TEST(Backward, LinearNodes) { RunBackward(target_tensors, {}); // Check Output Value - CompareGradTensorWithValue(leaf_tensor, 50.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 50.0); } /* @@ -227,17 +225,17 @@ Node0 Node1 */ TEST(Backward, WithAccumulation) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor std::vector target_tensors; - egr::EagerTensor tensor0 = CreateTensorWithValue( + egr::EagerTensor tensor0 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); - egr::EagerTensor tensor1 = CreateTensorWithValue( + egr::EagerTensor tensor1 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor0)); @@ -245,10 +243,10 @@ TEST(Backward, WithAccumulation) { // Create Grad Tensor std::vector grad_tensors; - egr::EagerTensor grad_tensor0 = CreateTensorWithValue( + egr::EagerTensor grad_tensor0 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); - egr::EagerTensor grad_tensor1 = CreateTensorWithValue( + egr::EagerTensor grad_tensor1 = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/); grad_tensors.emplace_back(std::move(grad_tensor0)); @@ -303,7 +301,7 @@ TEST(Backward, WithAccumulation) { std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta2->SetSingleOutRankWithSlot(0, 0); - egr::RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); // Connect Node2 -> AccumulationNode via Edge auto meta2 = egr::AutogradMeta(); @@ -314,7 +312,7 @@ TEST(Backward, WithAccumulation) { RunBackward(target_tensors, grad_tensors); - CompareGradTensorWithValue(leaf_tensor, 2500.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 2500.0); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc b/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc index e1e138cdee8ba..52e10b2b1b8a0 100644 --- a/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc +++ b/paddle/fluid/eager/tests/task_tests/cross_batch_accumulation_test.cc @@ -31,17 +31,15 @@ #include "paddle/fluid/eager/tests/test_utils.h" -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(CrossBatchAccumulation, SingleScaleNode) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -60,7 +58,7 @@ TEST(CrossBatchAccumulation, SingleScaleNode) { auto_grad_meta->SetGradNode( std::dynamic_pointer_cast(scale_node_ptr)); auto_grad_meta->SetSingleOutRankWithSlot(0, 0); - RetainGradForTensor(target_tensor); // result: 1.0 + egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0 auto meta = AutogradMeta(); meta.SetSingleOutRankWithSlot(0, 0); @@ -71,18 +69,18 @@ TEST(CrossBatchAccumulation, SingleScaleNode) { auto_grad_meta1->SetGradNode( std::dynamic_pointer_cast(acc_node_ptr)); auto_grad_meta1->SetSingleOutRankWithSlot(0, 0); - RetainGradForTensor(leaf_tensor); + egr_utils_api::RetainGradForTensor(leaf_tensor); } RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 1.0); - CompareGradTensorWithValue(leaf_tensor, 5.0); + eager_test::CompareGradTensorWithValue(target_tensor, 1.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 5.0); RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 1.0); - CompareGradTensorWithValue(leaf_tensor, 10.0); + eager_test::CompareGradTensorWithValue(target_tensor, 1.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 10.0); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc b/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc index 4d93f0188a746..c7c27dcc1d150 100644 --- a/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc +++ b/paddle/fluid/eager/tests/task_tests/eager_utils_test.cc @@ -24,10 +24,7 @@ #include "paddle/pten/api/lib/utils/allocator.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(EagerUtils, AutoGradMeta) { // Construct Eager Tensor @@ -167,7 +164,7 @@ TEST(EagerUtils, PassStopGradient) { TEST(EagerUtils, SyncToVarsSingle) { paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); - auto tensor = eager_test::CreateTestCPUTensor(5.0f, ddim); + auto tensor = CreateTestCPUTensor(5.0f, ddim); std::vector> var_bases = egr::EagerUtils::SyncToVars(tensor); @@ -185,9 +182,8 @@ TEST(EagerUtils, SyncToVarsSingle) { TEST(EagerUtils, SyncToVarsMultiple) { paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); - std::vector tensors = { - eager_test::CreateTestCPUTensor(1.0f, ddim), - eager_test::CreateTestCPUTensor(2.0f, ddim)}; + std::vector tensors = {CreateTestCPUTensor(1.0f, ddim), + CreateTestCPUTensor(2.0f, ddim)}; std::vector> var_bases = egr::EagerUtils::SyncToVars(tensors); @@ -280,4 +276,4 @@ TEST(EagerUtils, ConstructDuplicableOutput) { CHECK(outs[0]->initialized() == false); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc b/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc index 6e23226cde432..205f231eceeed 100644 --- a/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc +++ b/paddle/fluid/eager/tests/task_tests/forward_autograd_test.cc @@ -27,21 +27,18 @@ #include "paddle/pten/core/dense_tensor.h" #include "paddle/pten/core/tensor_meta.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(Forward, SingleNode) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(t)); @@ -55,7 +52,7 @@ TEST(Forward, SingleNode) { tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output - CompareTensorWithValue(out, 13.0); + eager_test::CompareTensorWithValue(out, 13.0); // Examine GradNode { @@ -80,14 +77,14 @@ Node1 out */ TEST(Forward, LinearNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(t)); @@ -108,10 +105,10 @@ TEST(Forward, LinearNodes) { out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine GradNode { @@ -156,14 +153,14 @@ TEST(Forward, LinearNodes) { out1 out2 */ TEST(Forward, BranchedNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(t)); @@ -190,13 +187,13 @@ TEST(Forward, BranchedNodes) { out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine Forward Output 2 - CompareTensorWithValue(out2, 150.0); + eager_test::CompareTensorWithValue(out2, 150.0); // Examine GradNode { @@ -248,4 +245,4 @@ TEST(Forward, BranchedNodes) { } } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc index 751e95487659c..e292844c8ee58 100644 --- a/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc +++ b/paddle/fluid/eager/tests/task_tests/fwd_bwd_joint_test.cc @@ -29,10 +29,7 @@ #include "paddle/fluid/eager/tests/test_utils.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { egr::EagerTensor hook_function(const egr::EagerTensor& t) { auto t_dense = std::dynamic_pointer_cast(t.impl()); @@ -61,14 +58,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) { } TEST(FwdBwdJoint, SingleNode) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward float scale = 2.0; @@ -77,7 +74,7 @@ TEST(FwdBwdJoint, SingleNode) { tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output - CompareTensorWithValue(out, 13.0); + eager_test::CompareTensorWithValue(out, 13.0); std::vector outs = {out}; // 4. Run Backward @@ -88,7 +85,7 @@ TEST(FwdBwdJoint, SingleNode) { EagerUtils::unsafe_autograd_meta(tensor)->Grad().impl()) ->data()[0]; // Examine Backward Grad - CompareGradTensorWithValue(tensor, 2.0); + eager_test::CompareGradTensorWithValue(tensor, 2.0); } /* @@ -101,14 +98,14 @@ Node1 out */ TEST(FwdBwdJoint, LinearNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -125,17 +122,17 @@ TEST(FwdBwdJoint, LinearNodes) { out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); std::vector outs = {out1}; // 4. Run Backward RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 10.0); + eager_test::CompareGradTensorWithValue(tensor, 10.0); } /* @@ -149,14 +146,14 @@ TEST(FwdBwdJoint, LinearNodes) { out1 out2 */ TEST(FwdBwdJoint, BranchedNodes) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -179,10 +176,10 @@ TEST(FwdBwdJoint, BranchedNodes) { out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine Forward Output 2 { @@ -201,7 +198,7 @@ TEST(FwdBwdJoint, BranchedNodes) { RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 30.0); + eager_test::CompareGradTensorWithValue(tensor, 30.0); } /* @@ -215,14 +212,14 @@ TEST(FwdBwdJoint, BranchedNodes) { out1 out2 */ TEST(FwdBwdJoint, GradientHook) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); std::function hook = &hook_function; @@ -234,24 +231,24 @@ TEST(FwdBwdJoint, GradientHook) { egr::EagerTensor out0 = egr::scale(tensor, scale0, bias0, true /*bias_after_scale*/, true /*trace_backward*/); - RetainGradForTensor(out0); // hook: +5 - RegisterGradientHookForTensor(out0, hook); // hook: +5 + egr_utils_api::RetainGradForTensor(out0); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out0, hook); // hook: +5 // Run Forward Node 1 float scale1 = 5.0; float bias1 = 10.0; egr::EagerTensor out1 = egr::scale( out0, scale1, bias1, true /*bias_after_scale*/, true /*trace_backward*/); - RetainGradForTensor(out1); // hook: +5 - RegisterGradientHookForTensor(out1, hook); // hook: +5 + egr_utils_api::RetainGradForTensor(out1); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out1, hook); // hook: +5 // Run Forward Node 2 float scale2 = 10.0; float bias2 = 20.0; egr::EagerTensor out2 = egr::scale( out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); - RetainGradForTensor(out2); // hook: +5 - RegisterGradientHookForTensor(out2, hook); // hook: +5 + egr_utils_api::RetainGradForTensor(out2); // hook: +5 + egr_utils_api::RegisterGradientHookForTensor(out2, hook); // hook: +5 // 4. Run Backward std::vector outs = {out1, out2}; @@ -259,16 +256,16 @@ TEST(FwdBwdJoint, GradientHook) { // Examine Backward Grad // leaf grad - CompareGradTensorWithValue(tensor, 190.0); + eager_test::CompareGradTensorWithValue(tensor, 190.0); // out0 grad - CompareGradTensorWithValue(out0, 90.0); + eager_test::CompareGradTensorWithValue(out0, 90.0); // out1 grad - CompareGradTensorWithValue(out1, 1.0); + eager_test::CompareGradTensorWithValue(out1, 1.0); // out2 grad - CompareGradTensorWithValue(out2, 1.0); + eager_test::CompareGradTensorWithValue(out2, 1.0); } /* @@ -282,14 +279,14 @@ TEST(FwdBwdJoint, GradientHook) { out1 out2 */ TEST(FwdBwdJoint, CrossBatchAccumulation) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -316,13 +313,13 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) { RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 30.0); + eager_test::CompareGradTensorWithValue(tensor, 30.0); // Cross Batch Accumulation RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 60.0); + eager_test::CompareGradTensorWithValue(tensor, 60.0); } /* ---------------------------------------------------- */ @@ -331,14 +328,14 @@ TEST(FwdBwdJoint, CrossBatchAccumulation) { #if defined(PADDLE_WITH_CUDA) || defined(PADDLE_WITH_HIP) TEST(FwdBwdJoint, SingleNodeCUDA) { - InitEnv(paddle::platform::CUDAPlace()); + eager_test::InitEnv(paddle::platform::CUDAPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CUDAPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward float scale = 2.0; @@ -347,14 +344,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) { tensor, scale, bias, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output - CompareTensorWithValue(out, 13.0); + eager_test::CompareTensorWithValue(out, 13.0); std::vector outs = {out}; // 4. Run Backward RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 2.0); + eager_test::CompareGradTensorWithValue(tensor, 2.0); } /* @@ -368,14 +365,14 @@ TEST(FwdBwdJoint, SingleNodeCUDA) { out1 out2 */ TEST(FwdBwdJoint, BranchedNodesCUDA) { - InitEnv(paddle::platform::CUDAPlace()); + eager_test::InitEnv(paddle::platform::CUDAPlace()); // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CUDAPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); // 3. Run Forward // Run Forward Node 0 @@ -398,11 +395,11 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) { out0, scale2, bias2, true /*bias_after_scale*/, true /*trace_backward*/); // Examine Forward Output 0 - CompareTensorWithValue(out0, 13.0); + eager_test::CompareTensorWithValue(out0, 13.0); // Examine Forward Output 1 - CompareTensorWithValue(out1, 75.0); + eager_test::CompareTensorWithValue(out1, 75.0); // Examine Forward Output 2 - CompareTensorWithValue(out2, 150.0); + eager_test::CompareTensorWithValue(out2, 150.0); // TODO(jiabin): fix this with add functor // 4. Run Backward @@ -410,8 +407,8 @@ TEST(FwdBwdJoint, BranchedNodesCUDA) { RunBackward(outs, {}); // Examine Backward Grad - CompareGradTensorWithValue(tensor, 30.0); + eager_test::CompareGradTensorWithValue(tensor, 30.0); } #endif -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/generated_test.cc b/paddle/fluid/eager/tests/task_tests/generated_test.cc index eb8d1e517eaf3..9d6e331067834 100644 --- a/paddle/fluid/eager/tests/task_tests/generated_test.cc +++ b/paddle/fluid/eager/tests/task_tests/generated_test.cc @@ -30,66 +30,63 @@ #include "paddle/fluid/eager/api/generated/fluid_generated/dygraph_forward_api.h" #include "paddle/pten/core/kernel_registry.h" -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(Generated, Sigmoid) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); VLOG(6) << "Init Env"; // 1. Prepare Input paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4}); VLOG(6) << "Make Dim"; - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 0.0, true); VLOG(6) << "Make EagerTensor"; - RetainGradForTensor(tensor); + egr_utils_api::RetainGradForTensor(tensor); VLOG(6) << "Retain Grad for Tensor"; auto output_tensor = sigmoid_dygraph_function(tensor, {}); VLOG(6) << "Run Backward"; - CompareVariableWithValue(output_tensor, 0.5); + eager_test::CompareVariableWithValue(output_tensor, 0.5); std::vector target_tensors = {output_tensor}; VLOG(6) << "Runing Backward"; RunBackward(target_tensors, {}); VLOG(6) << "Finish Backward"; - CompareGradVariableWithValue(tensor, 0.25); + eager_test::CompareGradVariableWithValue(tensor, 0.25); } TEST(Generated, Matmul_v2) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); auto tracer = std::make_shared(); paddle::imperative::SetCurrentTracer(tracer); // 1. Prepare Input paddle::framework::DDim ddimX = paddle::framework::make_ddim({4, 16}); - egr::EagerTensor X = CreateTensorWithValue( + egr::EagerTensor X = egr_utils_api::CreateTensorWithValue( ddimX, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 3.0, true); - RetainGradForTensor(X); + egr_utils_api::RetainGradForTensor(X); paddle::framework::DDim ddimY = paddle::framework::make_ddim({16, 20}); - egr::EagerTensor Y = CreateTensorWithValue( + egr::EagerTensor Y = egr_utils_api::CreateTensorWithValue( ddimY, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 2.0, true); - RetainGradForTensor(Y); + egr_utils_api::RetainGradForTensor(Y); auto output_tensor = matmul_v2_dygraph_function( X, Y, {{"trans_x", false}, {"trans_y", false}}); - CompareVariableWithValue(output_tensor, 96); + eager_test::CompareVariableWithValue(output_tensor, 96); std::vector target_tensors = {output_tensor}; RunBackward(target_tensors, {}); - CompareGradVariableWithValue(X, 2.0 * 20); - CompareGradVariableWithValue(Y, 3.0 * 4); + eager_test::CompareGradVariableWithValue(X, 2.0 * 20); + eager_test::CompareGradVariableWithValue(Y, 3.0 * 4); } -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/hook_test.cc b/paddle/fluid/eager/tests/task_tests/hook_test.cc index 326240d0cb7b9..32b28d8efd21b 100644 --- a/paddle/fluid/eager/tests/task_tests/hook_test.cc +++ b/paddle/fluid/eager/tests/task_tests/hook_test.cc @@ -30,9 +30,7 @@ #include "paddle/fluid/eager/tests/test_utils.h" -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { egr::EagerTensor hook_function(const egr::EagerTensor& t) { auto t_dense = std::dynamic_pointer_cast(t.impl()); @@ -61,14 +59,14 @@ egr::EagerTensor hook_function(const egr::EagerTensor& t) { } TEST(RetainGrad, HookBeforeRetainGrad) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -99,8 +97,9 @@ TEST(RetainGrad, HookBeforeRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RegisterGradientHookForTensor(target_tensor, hook); - RetainGradForTensor(target_tensor); // result: 1.0 + 3.0 = 4.0 + egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook); + egr_utils_api::RetainGradForTensor( + target_tensor); // result: 1.0 + 3.0 = 4.0 } // Connect ScaleNode -> AccumulationNode via Edge @@ -126,25 +125,26 @@ TEST(RetainGrad, HookBeforeRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RegisterGradientHookForTensor(leaf_tensor, hook); - RetainGradForTensor(leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0 + egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook); + egr_utils_api::RetainGradForTensor( + leaf_tensor); // result: 4.0*5.0 + 3.0 = 23.0 } RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 4.0); - CompareGradTensorWithValue(leaf_tensor, 23.0); + eager_test::CompareGradTensorWithValue(target_tensor, 4.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 23.0); } TEST(RetainGrad, HookAfterRetainGrad) { - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor tensor = CreateTensorWithValue( + egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); target_tensors.emplace_back(std::move(tensor)); @@ -173,8 +173,8 @@ TEST(RetainGrad, HookAfterRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RetainGradForTensor(target_tensor); // result: 1.0 - RegisterGradientHookForTensor(target_tensor, hook); + egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0 + egr_utils_api::RegisterGradientHookForTensor(target_tensor, hook); } // Connect ScaleNode -> AccumulationNode via Edge @@ -200,15 +200,15 @@ TEST(RetainGrad, HookAfterRetainGrad) { std::dynamic_pointer_cast( auto_grad_meta)); - RetainGradForTensor(leaf_tensor); // RetainGrad for leaf tensor gets - // postponed, result: 4.0*5.0 + 3.0 = - // 23.0 - RegisterGradientHookForTensor(leaf_tensor, hook); + egr_utils_api::RetainGradForTensor( + leaf_tensor); // RetainGrad for leaf tensor gets + // postponed, result: 4.0*5.0 + 3.0 = + // 23.0 + egr_utils_api::RegisterGradientHookForTensor(leaf_tensor, hook); } RunBackward(target_tensors, {}); - CompareGradTensorWithValue(target_tensor, 1.0); - CompareGradTensorWithValue(leaf_tensor, 23.0); + eager_test::CompareGradTensorWithValue(target_tensor, 1.0); + eager_test::CompareGradTensorWithValue(leaf_tensor, 23.0); } - -} // namespace eager_test +} // namespace egr diff --git a/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc b/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc index 5b96c726b2228..5e86cac83a285 100644 --- a/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc +++ b/paddle/fluid/eager/tests/task_tests/tensor_utils_test.cc @@ -23,39 +23,34 @@ #include "paddle/fluid/eager/tests/test_utils.h" #include "paddle/pten/api/lib/utils/allocator.h" -#include "paddle/pten/core/kernel_registry.h" - -// TODO(jiabin): remove nolint here!!! -using namespace egr; // NOLINT - -namespace eager_test { +namespace egr { TEST(TensorUtils, Test) { // Prepare Device Contexts - InitEnv(paddle::platform::CPUPlace()); + eager_test::InitEnv(paddle::platform::CPUPlace()); // Prepare Inputs std::vector target_tensors; paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32}); // Create Target Tensor - egr::EagerTensor t = CreateTensorWithValue( + egr::EagerTensor t = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 5.0 /*value*/, true /*is_leaf*/); - egr::EagerTensor t_grad = CreateTensorWithValue( + egr::EagerTensor t_grad = egr_utils_api::CreateTensorWithValue( ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32, pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/); - CHECK_EQ(IsLeafTensor(t), true); + CHECK_EQ(egr_utils_api::IsLeafTensor(t), true); // Test Utils - CompareTensorWithValue(t, 5.0); + eager_test::CompareTensorWithValue(t, 5.0); egr::AutogradMeta* meta = egr::EagerUtils::autograd_meta(&t); *meta->MutableGrad() = t_grad; - CompareGradTensorWithValue(t, 1.0); + eager_test::CompareGradTensorWithValue(t, 1.0); } -} // namespace eager_test +} // namespace egr