Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Eager dygraph egr_utils_api namespace refactor #37654

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions paddle/fluid/eager/api/utils/hook_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@
#include "paddle/pten/core/dense_tensor.h"

namespace egr {
namespace egr_utils_api {

void RegisterGradientHookForTensor(
const egr::EagerTensor& tensor,
Expand Down Expand Up @@ -90,4 +91,5 @@ void RetainGradForTensor(const egr::EagerTensor& tensor) {
}
}

} // namespace egr_utils_api
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/api/utils/hook_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/fluid/eager/grad_node_info.h"
#include "paddle/pten/api/all.h"
namespace egr {
namespace egr_utils_api {

void RegisterGradientHookForTensor(
const egr::EagerTensor& tensor,
Expand All @@ -27,4 +28,5 @@ void RegisterReduceHookForTensor(const egr::EagerTensor& tensor,
const std::function<void(void)>& hook);
void RetainGradForTensor(const egr::EagerTensor& tensor);

} // namespace egr_utils_api
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/api/utils/tensor_utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include "paddle/fluid/framework/variable.h"

namespace egr {
namespace egr_utils_api {

bool IsLeafTensor(const egr::EagerTensor& target) {
std::shared_ptr<GradNodeBase> grad_node = EagerUtils::grad_node(target);
Expand Down Expand Up @@ -58,4 +59,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
return out;
}

} // namespace egr_utils_api
} // namespace egr
2 changes: 2 additions & 0 deletions paddle/fluid/eager/api/utils/tensor_utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
#include "paddle/pten/api/all.h"

namespace egr {
namespace egr_utils_api {

// If and only if the tensor holds an AccumulationNode
// Then it's treated as a leaf tensor
Expand All @@ -29,4 +30,5 @@ egr::EagerTensor CreateTensorWithValue(const pten::DDim& ddim,
const pten::DataLayout& layout,
float value, bool is_leaf = true);

} // namespace egr_utils_api
} // namespace egr
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@
#include "gperftools/profiler.h"
#endif

// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT

// Disable pten path
DECLARE_bool(run_pten_kernel);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,8 +31,8 @@
#include "gperftools/profiler.h"
#endif

// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT
using namespace egr; // NOLINT
using namespace egr_utils_api; // NOLINT

DECLARE_bool(run_pten_kernel);

Expand Down
46 changes: 22 additions & 24 deletions paddle/fluid/eager/tests/task_tests/backward_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -30,19 +30,17 @@
#include "paddle/pten/core/dense_tensor.h"
#include "paddle/pten/core/tensor_meta.h"

using namespace egr; // NOLINT

namespace eager_test {
namespace egr {

TEST(Backward, SingleNodeEmptyGrad) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());

// Prepare Inputs
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});

// Create Target Tensor
egr::EagerTensor target_tensor = CreateTensorWithValue(
egr::EagerTensor target_tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);

Expand All @@ -67,7 +65,7 @@ TEST(Backward, SingleNodeEmptyGrad) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);

egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);

// Connect Node0 -> AccumulationNode via Edge
auto meta = egr::AutogradMeta();
Expand All @@ -80,26 +78,26 @@ TEST(Backward, SingleNodeEmptyGrad) {
RunBackward(outs, {});

// Check Output Value
CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
}

TEST(Backward, SingleNodeCustomGrad) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());

// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});

// Create Target Tensor
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));

std::vector<egr::EagerTensor> grad_tensors;
// Create Grad Tensor
egr::EagerTensor grad_tensor = CreateTensorWithValue(
egr::EagerTensor grad_tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor));
Expand Down Expand Up @@ -128,7 +126,7 @@ TEST(Backward, SingleNodeCustomGrad) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);

egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);

// Connect Node0 -> AccumulationNode via Edge
auto meta = egr::AutogradMeta();
Expand All @@ -141,7 +139,7 @@ TEST(Backward, SingleNodeCustomGrad) {
RunBackward(target_tensors, grad_tensors);

// Check Output Value
CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
}

/*
Expand All @@ -153,14 +151,14 @@ Node0
*/
TEST(Backward, LinearNodes) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());

// Prepare Inputs
std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});

// Create Target Tensor
egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
Expand Down Expand Up @@ -202,7 +200,7 @@ TEST(Backward, LinearNodes) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);

egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);

// Connect Node1 -> AccumulationNode via Edge
auto meta1 = egr::AutogradMeta();
Expand All @@ -215,7 +213,7 @@ TEST(Backward, LinearNodes) {
RunBackward(target_tensors, {});

// Check Output Value
CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 50.0);
}

/*
Expand All @@ -227,28 +225,28 @@ Node0 Node1
*/
TEST(Backward, WithAccumulation) {
// Prepare Device Contexts
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());

// Prepare Inputs
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});

// Create Target Tensor
std::vector<egr::EagerTensor> target_tensors;
egr::EagerTensor tensor0 = CreateTensorWithValue(
egr::EagerTensor tensor0 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
egr::EagerTensor tensor1 = CreateTensorWithValue(
egr::EagerTensor tensor1 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor0));
target_tensors.emplace_back(std::move(tensor1));

// Create Grad Tensor
std::vector<egr::EagerTensor> grad_tensors;
egr::EagerTensor grad_tensor0 = CreateTensorWithValue(
egr::EagerTensor grad_tensor0 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 5.0 /*value*/, false /*is_leaf*/);
egr::EagerTensor grad_tensor1 = CreateTensorWithValue(
egr::EagerTensor grad_tensor1 = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 10.0 /*value*/, false /*is_leaf*/);
grad_tensors.emplace_back(std::move(grad_tensor0));
Expand Down Expand Up @@ -303,7 +301,7 @@ TEST(Backward, WithAccumulation) {
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta2->SetSingleOutRankWithSlot(0, 0);

egr::RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);

// Connect Node2 -> AccumulationNode via Edge
auto meta2 = egr::AutogradMeta();
Expand All @@ -314,7 +312,7 @@ TEST(Backward, WithAccumulation) {

RunBackward(target_tensors, grad_tensors);

CompareGradTensorWithValue<float>(leaf_tensor, 2500.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 2500.0);
}

} // namespace eager_test
} // namespace egr
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,15 @@

#include "paddle/fluid/eager/tests/test_utils.h"

using namespace egr; // NOLINT

namespace eager_test {
namespace egr {

TEST(CrossBatchAccumulation, SingleScaleNode) {
InitEnv(paddle::platform::CPUPlace());
eager_test::InitEnv(paddle::platform::CPUPlace());

std::vector<egr::EagerTensor> target_tensors;
paddle::framework::DDim ddim = paddle::framework::make_ddim({4, 16, 16, 32});

egr::EagerTensor tensor = CreateTensorWithValue(
egr::EagerTensor tensor = egr_utils_api::CreateTensorWithValue(
ddim, paddle::platform::CPUPlace(), pten::DataType::FLOAT32,
pten::DataLayout::NCHW, 1.0 /*value*/, false /*is_leaf*/);
target_tensors.emplace_back(std::move(tensor));
Expand All @@ -60,7 +58,7 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
auto_grad_meta->SetGradNode(
std::dynamic_pointer_cast<GradNodeBase>(scale_node_ptr));
auto_grad_meta->SetSingleOutRankWithSlot(0, 0);
RetainGradForTensor(target_tensor); // result: 1.0
egr_utils_api::RetainGradForTensor(target_tensor); // result: 1.0

auto meta = AutogradMeta();
meta.SetSingleOutRankWithSlot(0, 0);
Expand All @@ -71,18 +69,18 @@ TEST(CrossBatchAccumulation, SingleScaleNode) {
auto_grad_meta1->SetGradNode(
std::dynamic_pointer_cast<GradNodeBase>(acc_node_ptr));
auto_grad_meta1->SetSingleOutRankWithSlot(0, 0);
RetainGradForTensor(leaf_tensor);
egr_utils_api::RetainGradForTensor(leaf_tensor);
}

RunBackward(target_tensors, {});

CompareGradTensorWithValue<float>(target_tensor, 1.0);
CompareGradTensorWithValue<float>(leaf_tensor, 5.0);
eager_test::CompareGradTensorWithValue<float>(target_tensor, 1.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 5.0);

RunBackward(target_tensors, {});

CompareGradTensorWithValue<float>(target_tensor, 1.0);
CompareGradTensorWithValue<float>(leaf_tensor, 10.0);
eager_test::CompareGradTensorWithValue<float>(target_tensor, 1.0);
eager_test::CompareGradTensorWithValue<float>(leaf_tensor, 10.0);
}

} // namespace eager_test
} // namespace egr
14 changes: 5 additions & 9 deletions paddle/fluid/eager/tests/task_tests/eager_utils_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -24,10 +24,7 @@

#include "paddle/pten/api/lib/utils/allocator.h"

// TODO(jiabin): remove nolint here!!!
using namespace egr; // NOLINT

namespace eager_test {
namespace egr {

TEST(EagerUtils, AutoGradMeta) {
// Construct Eager Tensor
Expand Down Expand Up @@ -167,7 +164,7 @@ TEST(EagerUtils, PassStopGradient) {

TEST(EagerUtils, SyncToVarsSingle) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
auto tensor = eager_test::CreateTestCPUTensor(5.0f, ddim);
auto tensor = CreateTestCPUTensor(5.0f, ddim);
std::vector<std::shared_ptr<egr::EagerTensor>> var_bases =
egr::EagerUtils::SyncToVars(tensor);

Expand All @@ -185,9 +182,8 @@ TEST(EagerUtils, SyncToVarsSingle) {

TEST(EagerUtils, SyncToVarsMultiple) {
paddle::framework::DDim ddim = paddle::framework::make_ddim({2, 4, 4, 4});
std::vector<egr::EagerTensor> tensors = {
eager_test::CreateTestCPUTensor(1.0f, ddim),
eager_test::CreateTestCPUTensor(2.0f, ddim)};
std::vector<egr::EagerTensor> tensors = {CreateTestCPUTensor(1.0f, ddim),
CreateTestCPUTensor(2.0f, ddim)};

std::vector<std::shared_ptr<egr::EagerTensor>> var_bases =
egr::EagerUtils::SyncToVars(tensors);
Expand Down Expand Up @@ -280,4 +276,4 @@ TEST(EagerUtils, ConstructDuplicableOutput) {
CHECK(outs[0]->initialized() == false);
}

} // namespace eager_test
} // namespace egr
Loading