Skip to content

Commit

Permalink
test
Browse files Browse the repository at this point in the history
  • Loading branch information
risemeup1 committed Sep 17, 2023
2 parents 6056645 + 50669e0 commit f8df0df
Show file tree
Hide file tree
Showing 473 changed files with 14,661 additions and 6,512 deletions.
2 changes: 1 addition & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ cppcoreguidelines-avoid-c-arrays,
cppcoreguidelines-c-copy-assignment-signature,
cppcoreguidelines-explicit-virtual-functions,
-cppcoreguidelines-init-variables,
-cppcoreguidelines-narrowing-conversions,
cppcoreguidelines-narrowing-conversions,
-cppcoreguidelines-no-malloc,
-cppcoreguidelines-pro-type-const-cast,
-cppcoreguidelines-pro-type-member-init,
Expand Down
9 changes: 7 additions & 2 deletions .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,6 @@
select = C,E,W
exclude =
./build,
# Exclude fluid directory
./python/paddle/base/**,
# Exclude third-party libraries
./third_party/**,
./python/paddle/utils/gast/**,
Expand All @@ -27,3 +25,10 @@ ignore =
per-file-ignores =
# These files need tabs for testing.
test/dygraph_to_static/test_error.py:E101,W191

# temp ignore base directory
python/paddle/base/*:
E713,
E712,
E266,
E714
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -97,5 +97,6 @@ python/paddle/incubate/fleet/parameter_server/pslib/ps_pb2.py
paddle/phi/kernels/fusion/cutlass/conv2d/generated/*
python/paddle/base/incubate/fleet/parameter_server/pslib/ps_pb2.py
paddle/fluid/ir_adaptor/translator/op_compat_info.cc
paddle/phi/kernels/fusion/cutlass/cutlass_kernels/fpA_intB_gemm/autogen/*
paddle/fluid/pybind/static_op_function.*
paddle/fluid/pybind/ops_api.cc
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ repos:
- id: flake8
args: ["--config=.flake8"]
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.0.272
rev: v0.0.289
hooks:
- id: ruff
args: [--fix, --exit-non-zero-on-fix, --no-cache]
Expand Down
22 changes: 2 additions & 20 deletions paddle/cinn/backends/codegen_c.cc
Original file line number Diff line number Diff line change
Expand Up @@ -285,31 +285,13 @@ void CodeGenC::Visit(const ir::Select *op) {
void CodeGenC::Visit(const ir::IfThenElse *op) {
str_ += "if (";
IrPrinter::Visit(op->condition);
str_ += ") {\n";
str_ += ") ";

if (!op->true_case.As<ir::Block>()) IncIndent();
DoIndent();
IrPrinter::Visit(op->true_case);
if (!op->true_case.As<ir::Block>()) str_ += ";";
str_ += "\n";

if (!op->true_case.As<ir::Block>()) DecIndent();

DoIndent();
str_ += "}";

if (op->false_case.defined()) {
str_ += " else {\n";

if (!op->true_case.As<ir::Block>()) IncIndent();
DoIndent();
str_ += " else ";
IrPrinter::Visit(op->false_case);
if (!op->false_case.As<ir::Block>()) str_ += ";";
str_ += "\n";
if (!op->true_case.As<ir::Block>()) DecIndent();

DoIndent();
str_ += "}";
}
}
void CodeGenC::Visit(const ir::Block *op) {
Expand Down
4 changes: 0 additions & 4 deletions paddle/cinn/backends/ir_schedule_test.cc
Original file line number Diff line number Diff line change
Expand Up @@ -794,10 +794,8 @@ void test_simple_compute_at(void* _args, int32_t num_args)
for (int32_t i_j_fused_1 = 0; i_j_fused_1 < 2; i_j_fused_1 += 1) {
for (int32_t i_j_fused_2 = 0; i_j_fused_2 < 1024; i_j_fused_2 += 1) {
if ((((1024 * i_j_fused_1) + i_j_fused_2) < 1280)) {
{
B[((1024 * i_j_fused_1) + i_j_fused_2)] = A[((1024 * i_j_fused_1) + i_j_fused_2)];
C[((1024 * i_j_fused_1) + i_j_fused_2)] = B[((1024 * i_j_fused_1) + i_j_fused_2)];
}
};
};
};
Expand Down Expand Up @@ -869,10 +867,8 @@ void test_compute_at0(void* _args, int32_t num_args)
for (int32_t i_j_fused_1 = 0; i_j_fused_1 < 2; i_j_fused_1 += 1) {
for (int32_t i_j_fused_2 = 0; i_j_fused_2 < 1024; i_j_fused_2 += 1) {
if ((((1024 * i_j_fused_1) + i_j_fused_2) < 1280)) {
{
B[((1024 * i_j_fused_1) + i_j_fused_2)] = A[((1024 * i_j_fused_1) + i_j_fused_2)];
C[((1024 * i_j_fused_1) + i_j_fused_2)] = B[((1024 * i_j_fused_1) + i_j_fused_2)];
}
};
};
};
Expand Down
4 changes: 2 additions & 2 deletions paddle/cinn/backends/llvm/codegen_x86.cc
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
#include "paddle/cinn/common/target.h"
#include "paddle/cinn/ir/ir.h"
#include "paddle/cinn/ir/op/ir_operators.h"
#include "paddle/cinn/optim/collect_undefined_vars.h"
#include "paddle/cinn/ir/utils/ir_nodes_collector.h"
#include "paddle/cinn/runtime/intrinsic.h"

namespace cinn::backends {
Expand Down Expand Up @@ -98,7 +98,7 @@ void CodeGenX86::CreateParallelLaunch(Expr body, int num_task) {
llvm::Function::PrivateLinkage,
"__parallel_lambda",
m_);
std::vector<std::string> vars = optim::CollectUndefinedVars(&body);
std::vector<std::string> vars = ir::CollectUndefinedVars(&body);
uint64_t nbytes;
auto* data = PackVars(vars, &nbytes);

Expand Down
8 changes: 3 additions & 5 deletions paddle/cinn/hlir/framework/new_ir/op_lowering_impl.cc
Original file line number Diff line number Diff line change
Expand Up @@ -53,9 +53,7 @@ std::vector<ir::Tensor> CollectInputTensor(
std::vector<ir::Tensor>* func_args,
std::unordered_map<::pir::Value, ir::Tensor>* tensor_map) {
std::vector<ir::Tensor> tensors;
for (auto& operand : op->operands()) {
CHECK(operand);
auto in_value = operand.source();
for (auto in_value : op->operands_source()) {
VLOG(4) << "input tensor name: " << CompatibleInfo::ValueName(in_value);
// NOTE(Aurelius84): Need always to create placeholder for input tensor.
ir::Tensor tensor = details::GetTensor(in_value);
Expand All @@ -72,7 +70,7 @@ std::vector<ir::Tensor> CollectInputTensor(
return tensors;
}

void CollectOutputInfo(const ::pir::Operation* op,
void CollectOutputInfo(::pir::Operation* op,
std::vector<Type>* out_types,
std::vector<std::vector<int>>* out_shapes) {
auto op_results = op->results();
Expand Down Expand Up @@ -359,7 +357,7 @@ std::vector<ir::Expr> OpLowererImpl::LowerOps(

std::vector<ir::LoweredFunc> OpLowererImpl::DoOpLower(
std::shared_ptr<hlir::framework::OpImpl> op_impl,
const ::pir::Operation* op,
::pir::Operation* op,
std::unordered_map<::pir::Value, ir::Tensor>* tensor_map,
std::vector<ir::Tensor>* op_func_arg_tensors) {
VLOG(4) << "Do lower with Compute, op: " << op->name();
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/new_ir/op_lowering_impl.h
Original file line number Diff line number Diff line change
Expand Up @@ -131,7 +131,7 @@ class OpLowererImpl : public OpLowererImplBase<GroupPtr> {
*/
std::vector<ir::LoweredFunc> DoOpLower(
std::shared_ptr<hlir::framework::OpImpl> op_impl,
const ::pir::Operation* op,
::pir::Operation* op,
std::unordered_map<::pir::Value, ir::Tensor>* tensor_map,
std::vector<ir::Tensor>* op_func_arg_tensors);

Expand Down
3 changes: 1 addition & 2 deletions paddle/cinn/hlir/framework/new_ir/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -74,8 +74,7 @@ std::vector<std::string> CompatibleInfo::InputNames(const ::pir::Operation& op,
return names;
}

std::vector<std::string> CompatibleInfo::OutputNames(
const ::pir::Operation& op) {
std::vector<std::string> CompatibleInfo::OutputNames(::pir::Operation& op) {
std::vector<std::string> names;
for (int i = 0; i < op.num_results(); ++i) {
auto value = op.result(i);
Expand Down
2 changes: 1 addition & 1 deletion paddle/cinn/hlir/framework/new_ir/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ struct CompatibleInfo {
static std::vector<std::string> InputNames(const ::pir::Operation& op,
bool allow_duplicate = false);

static std::vector<std::string> OutputNames(const ::pir::Operation& op);
static std::vector<std::string> OutputNames(::pir::Operation& op); // NOLINT
};

} // namespace newir
Expand Down
8 changes: 6 additions & 2 deletions paddle/cinn/ir/ir.cc
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,7 @@ Expr For::Make(Var loop_var,
node->min = min;
node->extent = extent;
node->device_api = device_api;
node->body = body;
node->body = body.As<ir::Block>() ? body : ir::Block::Make({body});
node->set_for_type(for_type);
node->set_vectorize_info(vector_info);
node->set_bind_info(bind_info);
Expand Down Expand Up @@ -346,6 +346,10 @@ std::vector<const Expr *> ScheduleBlockRealize::expr_fields() const {
}

Expr IfThenElse::Make(Expr condition, Expr true_case, Expr false_case) {
if (true_case.defined() && (!true_case.As<Block>()))
true_case = ir::Block::Make({true_case});
if (false_case.defined() && (!false_case.As<Block>()))
false_case = ir::Block::Make({false_case});
auto node = make_shared<IfThenElse>(condition, true_case, false_case);
return Expr(node);
}
Expand Down Expand Up @@ -513,7 +517,7 @@ Expr PolyFor::Make(Var iterator,
n->condition = condition;
n->inc = inc;
n->device_api = device_api;
n->body = body;
n->body = body.As<ir::Block>() ? body : ir::Block::Make({body});
n->set_for_type(for_type);
n->set_vectorize_info(vectorize_info);
n->set_bind_info(bind_info);
Expand Down
86 changes: 86 additions & 0 deletions paddle/cinn/ir/utils/ir_nodes_collector.cc
Original file line number Diff line number Diff line change
Expand Up @@ -207,6 +207,92 @@ std::set<Expr> CollectReferencedTensors(
return ts0;
}

std::vector<std::string> CollectUndefinedVars(const Expr* e) {
struct Mutator : public ir::IRMutator<const Expr*> {
using ir::IRMutator<const Expr*>::Visit;
std::vector<std::string> undefined_vars;
std::set<std::string> defined_vars;
std::set<std::string> used_vars;

void CollectVarDef(const std::string& var) {
CHECK(!defined_vars.count(var))
<< "var " << var << " has been defined, please check";
CHECK(!used_vars.count(var))
<< "var " << var << " is wrongly used before definition";
defined_vars.insert(var);
}

void ClearVar(const std::string& var) {
defined_vars.erase(var);
used_vars.erase(var);
}

void CollectVarUse(const std::string& var) {
used_vars.insert(var);
if (defined_vars.count(var) == 0) {
undefined_vars.push_back(var);
}
}

void Visit(const ir::Let* op, const Expr* expr) override {
Expr symbol = op->symbol;
auto var = symbol.as_var_ref();
CHECK(var.defined());
CollectVarDef(var->name);
auto* node = expr->As<ir::Let>();
Visit(&node->body, &node->body);
}

void Visit(const ir::For* op, const Expr* expr) override {
CollectVarDef(op->loop_var->name);
auto* node = expr->As<ir::For>();
Visit(&node->min, &node->min);
Visit(&node->extent, &node->extent);
Visit(&node->body, &node->body);
ClearVar(op->loop_var->name);
}

void Visit(const ir::Load* op, const Expr* expr) override {
auto tensor = op->tensor.as_tensor_ref();
CollectVarUse(tensor->name);
auto* node = expr->As<ir::Load>();
for (auto& idx : node->indices) Visit(&idx, &idx);
}

void Visit(const ir::Store* op, const Expr* expr) override {
auto tensor = op->tensor.as_tensor_ref();
CollectVarUse(tensor->name);
auto* node = expr->As<ir::Store>();
for (auto& idx : node->indices) Visit(&idx, &idx);
Visit(&node->value, &node->value);
}

void Visit(const ir::_Var_* op, const Expr* expr) override {
CollectVarUse(op->name);
auto* node = expr->As<ir::_Var_>();
if (node->lower_bound.defined()) {
Visit(&node->lower_bound, &node->lower_bound);
}
if (node->upper_bound.defined()) {
Visit(&node->upper_bound, &node->upper_bound);
}
}

void Visit(const ir::Reduce* op, const Expr* expr) override {
for (auto& axis : op->reduce_axis) {
CollectVarDef(axis->name);
}
auto* node = expr->As<ir::Reduce>();
if (node->init.defined()) Visit(&node->init, &node->init);
Visit(&node->body, &node->body);
}
};

Mutator mutator;
mutator.Visit(e, e);
return mutator.undefined_vars;
}

std::set<std::string> CollectTensorNeedsWrite(const Expr* e) {
std::set<std::string> tensor_written;
IrNodesCollector::handler_t handler = [&](const Expr* x) {
Expand Down
17 changes: 17 additions & 0 deletions paddle/cinn/ir/utils/ir_nodes_collector.h
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,23 @@ std::map<std::string, Expr> CollectTensorMap(
return true;
});

/**
* Collect undefined vars in the scope.
*
* e.g.
*
* The expression:
* for i
* for j
* a[i, j] = b[i, j]
*
* here a, b are vars without definition
*/
std::vector<std::string> CollectUndefinedVars(const Expr* e);

/**
* Collect the Tensor Nodes which will be Writed by Store or Call Nodes
*/
std::set<std::string> CollectTensorNeedsWrite(const Expr* e);

} // namespace ir
Expand Down
18 changes: 2 additions & 16 deletions paddle/cinn/ir/utils/ir_printer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -229,26 +229,12 @@ void IrPrinter::Visit(const PolyFor *x) {
void IrPrinter::Visit(const IfThenElse *x) {
str_ += "if (";
Visit(x->condition);
str_ += ") {\n";
IncIndent();
DoIndent();
str_ += ") ";
Visit(x->true_case);
DecIndent();
str_ += "\n";
DoIndent();
str_ += "}";

if (x->false_case.defined()) {
str_ += " else {\n";
IncIndent();

DoIndent();
str_ += " else ";
Visit(x->false_case);
str_ += "\n";

DecIndent();
DoIndent();
str_ += "}";
}
}
void IrPrinter::Visit(const Block *x) {
Expand Down
1 change: 0 additions & 1 deletion paddle/cinn/optim/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,6 @@ gather_srcs(
replace_const_param_to_integer.cc
lower_intrin.cc
cast_bool_to_int8.cc
collect_undefined_vars.cc
var_mod_simplify.cc
remove_schedule_block.cc)

Expand Down
Loading

0 comments on commit f8df0df

Please sign in to comment.