From 32f56719bac350023c8ae4ccc4f63a8ce10ab1cf Mon Sep 17 00:00:00 2001 From: chenruibiao Date: Fri, 15 Jul 2022 18:19:44 +0800 Subject: [PATCH 1/2] Rename BOOST_GET macros --- .../distributed/fleet_executor/dist_model.cc | 8 +- .../forwards/fused_attention_fwd_func.cc | 2 +- .../forwards/fused_feedforward_fwd_func.cc | 2 +- .../forwards/fused_gate_attention_fwd_func.cc | 4 +- .../nodes/fused_attention_node.cc | 2 +- .../nodes/fused_feedforward_node.cc | 2 +- .../nodes/fused_gate_attention_node.cc | 4 +- .../auto_code_generator/eager_generator.cc | 8 +- .../eager/to_static/run_program_op_node.h | 20 +-- paddle/fluid/framework/attribute.cc | 26 +-- paddle/fluid/framework/attribute.h | 20 +-- paddle/fluid/framework/block_desc.cc | 5 +- .../details/async_ssa_graph_executor.cc | 6 +- .../details/fetch_async_op_handle.cc | 4 +- .../framework/details/fetch_op_handle.cc | 20 +-- .../framework/details/multi_devices_helper.h | 6 +- .../details/parallel_ssa_graph_executor.cc | 8 +- paddle/fluid/framework/executor.cc | 4 +- paddle/fluid/framework/feed_fetch_method.cc | 2 +- paddle/fluid/framework/grad_op_desc_maker.h | 2 +- paddle/fluid/framework/infershape_utils.cc | 86 +++++----- paddle/fluid/framework/infershape_utils.h | 6 +- .../ir/adaptive_pool2d_convert_global_pass.cc | 8 +- ...ptive_pool2d_convert_global_pass_tester.cc | 2 +- .../fluid/framework/ir/conv_bn_fuse_pass.cc | 4 +- .../ir/cudnn_placement_pass_tester.cc | 2 +- .../ir/delete_fill_constant_op_pass.cc | 4 +- .../ir/delete_quant_dequant_filter_op_pass.cc | 4 +- .../ir/delete_quant_dequant_linear_op_pass.cc | 2 +- .../ir/delete_quant_dequant_op_pass.cc | 2 +- .../delete_weight_dequant_linear_op_pass.cc | 4 +- .../ir/embedding_fc_lstm_fuse_pass.cc | 4 +- .../ir/fc_elementwise_layernorm_fuse_pass.cc | 2 +- paddle/fluid/framework/ir/fc_fuse_pass.cc | 6 +- .../framework/ir/fuse_elewise_add_act_pass.cc | 2 +- .../framework/ir/fuse_gemm_epilogue_pass.cc | 6 +- .../fuse_adam_op_pass.cc | 74 ++++----- .../fuse_momentum_op_pass.cc | 32 ++-- .../fuse_sgd_op_pass.cc | 2 +- paddle/fluid/framework/ir/fuse_pass_base.cc | 4 +- .../ir/fusion_group/code_generator_helper.cc | 10 +- .../ir/fusion_group/fusion_group_pass.cc | 2 +- .../ir/gpu_cpu_map_matmul_to_mul_pass.cc | 34 ++-- .../framework/ir/graph_pattern_detector.cc | 2 +- .../framework/ir/graph_pattern_detector.h | 2 +- paddle/fluid/framework/ir/graph_viz_pass.cc | 2 +- .../framework/ir/ipu/delete_scale_op_pass.cc | 4 +- .../ir/ipu/forward_graph_extract_pass.cc | 2 +- .../ir/ipu/inference_dtype_transfer_pass.cc | 3 +- .../ir/ipu/inference_process_pass.cc | 4 +- .../ir/ipu/optimizer_extract_pass.cc | 62 +++---- .../ir/ipu/optimizer_state_align_pass.cc | 4 +- .../fluid/framework/ir/is_test_pass_tester.cc | 4 +- .../framework/ir/layer_norm_fuse_pass.cc | 14 +- .../framework/ir/lock_free_optimize_pass.cc | 2 +- .../framework/ir/matmul_scale_fuse_pass.cc | 10 +- ...est_reference_count_pass_last_lived_ops.cc | 12 +- .../ir/mkldnn/batch_norm_act_fuse_pass.cc | 4 +- .../mkldnn/batch_norm_act_fuse_pass_tester.cc | 6 +- .../compute_propagate_scales_mkldnn_pass.cc | 2 +- .../conv_activation_mkldnn_fuse_pass.cc | 7 +- ...conv_activation_mkldnn_fuse_pass_tester.cc | 4 +- .../conv_bias_mkldnn_fuse_pass_tester.cc | 4 +- ...onv_concat_relu_mkldnn_fuse_pass_tester.cc | 2 +- .../framework/ir/mkldnn/cpu_quantize_pass.cc | 4 +- .../ir/mkldnn/cpu_quantize_pass_tester.cc | 9 +- .../ir/mkldnn/cpu_quantize_squash_pass.cc | 14 +- .../mkldnn/cpu_quantize_squash_pass_tester.cc | 8 +- .../depthwise_conv_mkldnn_pass_tester.cc | 4 +- .../ir/mkldnn/elt_act_mkldnn_fuse_pass.cc | 4 +- .../ir/mkldnn/fc_act_mkldnn_fuse_pass.cc | 4 +- .../mkldnn/fc_act_mkldnn_fuse_pass_tester.cc | 28 ++-- .../matmul_activation_mkldnn_fuse_pass.cc | 7 +- .../matmul_transpose_reshape_fuse_pass.cc | 4 +- .../ir/mkldnn/mkldnn_inplace_pass.cc | 2 +- .../framework/ir/mkldnn/mkldnn_pass_util.h | 2 +- .../ir/mkldnn/mkldnn_placement_pass_tester.cc | 2 +- .../ir/mkldnn/multi_gru_seq_fuse_pass.cc | 4 +- .../ir/mkldnn/quant_dequant_mkldnn_pass.cc | 6 +- .../shuffle_channel_mkldnn_detect_pass.cc | 6 +- ...uffle_channel_mkldnn_detect_pass_tester.cc | 2 +- .../softplus_activation_mkldnn_fuse_pass.cc | 4 +- ...plus_activation_mkldnn_fuse_pass_tester.cc | 28 ++-- .../framework/ir/multi_batch_merge_pass.cc | 4 +- .../multi_devices_graph_pass.cc | 22 +-- .../ir/multihead_matmul_fuse_pass.cc | 28 ++-- .../framework/ir/op_compat_sensible_pass.cc | 12 +- .../framework/ir/op_compat_sensible_pass.h | 12 +- .../ir/quant_conv2d_dequant_fuse_pass.cc | 8 +- .../ir/remove_padding_recover_padding_pass.cc | 2 +- .../ir/repeated_fc_relu_fuse_pass.cc | 4 +- .../framework/ir/seqpool_concat_fuse_pass.cc | 2 +- .../ir/shuffle_channel_detect_pass.cc | 6 +- .../ir/simplify_with_basic_ops_pass.cc | 16 +- .../ir/sync_batch_norm_pass_tester.cc | 2 +- .../ir/transpose_flatten_concat_fuse_pass.cc | 14 +- .../ir/trt_map_matmul_to_mul_pass.cc | 30 ++-- .../ir/trt_multihead_matmul_fuse_pass.cc | 28 ++-- .../ir/unsqueeze2_eltwise_fuse_pass.cc | 4 +- .../new_executor/standalone_executor_test.cc | 8 +- .../no_need_buffer_vars_inference_test.cc | 4 +- paddle/fluid/framework/op_call_stack.cc | 2 +- paddle/fluid/framework/op_desc.cc | 12 +- paddle/fluid/framework/op_desc.h | 2 +- paddle/fluid/framework/operator.cc | 48 +++--- paddle/fluid/framework/operator.h | 4 +- paddle/fluid/framework/operator_test.cc | 2 +- .../framework/paddle2cinn/build_cinn_pass.cc | 2 +- .../paddle2cinn/build_cinn_pass_test.cc | 2 +- .../paddle2cinn/cinn_compiler_test.cc | 6 +- paddle/fluid/framework/parallel_executor.cc | 4 +- paddle/fluid/framework/program_desc.cc | 6 +- paddle/fluid/framework/program_processing.cc | 2 +- paddle/fluid/framework/prune.cc | 2 +- paddle/fluid/framework/tuple_test.cc | 6 +- paddle/fluid/framework/var_desc.cc | 2 +- paddle/fluid/imperative/dygraph_grad_maker.h | 2 +- paddle/fluid/imperative/layout_autotune.cc | 7 +- paddle/fluid/imperative/layout_transformer.h | 26 +-- paddle/fluid/imperative/op_base.h | 2 +- paddle/fluid/imperative/prepared_operator.h | 50 +++--- paddle/fluid/imperative/tests/test_layer.cc | 2 +- .../analysis/ir_passes/subgraph_util.cc | 4 +- .../fluid/inference/api/analysis_predictor.cc | 16 +- paddle/fluid/inference/api/api_impl.cc | 10 +- paddle/fluid/inference/api/api_impl_tester.cc | 8 +- .../tensorrt/convert/activation_op.cc | 31 ++-- .../tensorrt/convert/anchor_generator_op.cc | 10 +- .../inference/tensorrt/convert/arg_max_op.cc | 4 +- .../tensorrt/convert/batch_norm_op.cc | 2 +- .../tensorrt/convert/bilinear_interp_v2_op.cc | 14 +- .../tensorrt/convert/c_allreduce_op.cc | 4 +- .../inference/tensorrt/convert/cast_op.cc | 2 +- .../inference/tensorrt/convert/clip_op.cc | 4 +- .../inference/tensorrt/convert/concat_op.cc | 2 +- .../inference/tensorrt/convert/conv2d_op.cc | 12 +- .../inference/tensorrt/convert/conv3d_op.cc | 12 +- .../tensorrt/convert/deformable_conv_op.cc | 12 +- .../inference/tensorrt/convert/dropout_op.cc | 4 +- .../tensorrt/convert/elementwise_op.cc | 2 +- .../tensorrt/convert/emb_eltwise_layernorm.cc | 4 +- .../inference/tensorrt/convert/equal_op.cc | 2 +- .../fluid/inference/tensorrt/convert/fc_op.cc | 22 +-- .../convert/flatten_contiguous_range_op.cc | 4 +- .../tensorrt/convert/fused_token_prune_op.cc | 9 +- .../inference/tensorrt/convert/gather_op.cc | 2 +- .../inference/tensorrt/convert/gelu_op.cc | 2 +- .../tensorrt/convert/group_norm_op.cc | 4 +- .../tensorrt/convert/hard_sigmoid_op.cc | 4 +- .../tensorrt/convert/hard_swish_op.cc | 11 +- .../tensorrt/convert/instance_norm_op.cc | 2 +- .../tensorrt/convert/layer_norm_op.cc | 4 +- .../tensorrt/convert/leaky_relu_op.cc | 4 +- .../inference/tensorrt/convert/matmul_op.cc | 8 +- .../inference/tensorrt/convert/mish_op.cc | 2 +- .../tensorrt/convert/multiclass_nms3_op.cc | 12 +- .../tensorrt/convert/multiclass_nms_op.cc | 12 +- .../tensorrt/convert/multihead_matmul_op.cc | 16 +- .../tensorrt/convert/nearest_interp_op.cc | 12 +- .../tensorrt/convert/nearest_interp_v2_op.cc | 12 +- .../inference/tensorrt/convert/op_converter.h | 8 +- .../inference/tensorrt/convert/pad_op.cc | 2 +- .../inference/tensorrt/convert/pool2d_op.cc | 20 +-- .../inference/tensorrt/convert/pool3d_op.cc | 20 +-- .../convert/preln_emb_eltwise_layernorm.cc | 4 +- .../tensorrt/convert/preln_residual_bias.cc | 2 +- .../inference/tensorrt/convert/prelu_op.cc | 4 +- .../tensorrt/convert/recover_padding_op.cc | 2 +- .../inference/tensorrt/convert/reduce_op.cc | 6 +- .../tensorrt/convert/remove_padding_op.cc | 2 +- .../inference/tensorrt/convert/reshape_op.cc | 2 +- .../tensorrt/convert/roi_align_op.cc | 10 +- .../inference/tensorrt/convert/roll_op.cc | 4 +- .../inference/tensorrt/convert/scale_op.cc | 6 +- .../tensorrt/convert/shuffle_channel_op.cc | 2 +- .../tensorrt/convert/skip_layernorm.cc | 2 +- .../inference/tensorrt/convert/slice_op.cc | 10 +- .../inference/tensorrt/convert/softmax_op.cc | 2 +- .../tensorrt/convert/sparse_fc_op.cc | 22 +-- .../convert/sparse_multihead_matmul_op.cc | 16 +- .../inference/tensorrt/convert/split_op.cc | 6 +- .../inference/tensorrt/convert/squeeze2_op.cc | 2 +- .../inference/tensorrt/convert/stack_op.cc | 4 +- .../tensorrt/convert/strided_slice_op.cc | 8 +- .../inference/tensorrt/convert/swish_op.cc | 2 +- .../inference/tensorrt/convert/tile_op.cc | 2 +- .../inference/tensorrt/convert/top_k_op.cc | 13 +- .../tensorrt/convert/transpose_op.cc | 2 +- .../tensorrt/convert/unsqueeze2_op.cc | 2 +- .../tensorrt/convert/yolo_box_head_op.cc | 4 +- .../inference/tensorrt/convert/yolo_box_op.cc | 16 +- paddle/fluid/inference/tensorrt/op_teller.cc | 153 +++++++++--------- paddle/fluid/operators/activation_op.cc | 2 +- paddle/fluid/operators/batch_norm_op.cc | 6 +- paddle/fluid/operators/benchmark/op_tester.cc | 11 +- .../collective/c_allreduce_sum_op.cc | 2 +- .../fluid/operators/controlflow/fetch_op.cc | 6 +- .../operators/controlflow/fetch_v2_op.cc | 4 +- .../fluid/operators/controlflow/op_variant.h | 2 +- .../controlflow/recurrent_op_helper.cc | 4 +- .../fluid/operators/controlflow/while_op.cc | 4 +- paddle/fluid/operators/cum_op.cc | 16 +- .../detection/collect_fpn_proposals_op.cc | 4 +- paddle/fluid/operators/empty_op.cc | 2 +- paddle/fluid/operators/eye_op.cc | 2 +- paddle/fluid/operators/fill_any_like_op.cc | 2 +- paddle/fluid/operators/fill_constant_op.cc | 2 +- paddle/fluid/operators/fill_op.cc | 2 +- .../operators/fused/fused_attention_op.cc | 2 +- .../fused/fused_elemwise_activation_op.cc | 4 +- .../fused/fused_embedding_seq_pool_op.cc | 4 +- .../operators/fused/fused_feedforward_op.cc | 2 +- .../fused/fused_gate_attention_op.cc | 4 +- paddle/fluid/operators/graph_send_recv_op.cc | 6 +- .../operators/hierarchical_sigmoid_op.cc | 2 +- paddle/fluid/operators/im2sequence_op.cc | 2 +- paddle/fluid/operators/increment_op.cc | 2 +- paddle/fluid/operators/inplace_abn_op.cc | 2 +- paddle/fluid/operators/lod_reset_op.cc | 2 +- paddle/fluid/operators/lookup_table_op.cc | 2 +- paddle/fluid/operators/lookup_table_v2_op.cc | 2 +- .../fluid/operators/match_matrix_tensor_op.cc | 8 +- paddle/fluid/operators/nce_op.cc | 2 +- paddle/fluid/operators/prim_ops/add_p_op.cc | 6 +- .../operators/prim_ops/broadcast_p_op.cc | 4 +- .../fluid/operators/prim_ops/concat_p_op.cc | 6 +- paddle/fluid/operators/prim_ops/div_p_op.cc | 6 +- .../operators/prim_ops/fill_constant_p_op.cc | 4 +- .../fluid/operators/prim_ops/gather_p_op.cc | 6 +- .../fluid/operators/prim_ops/matmul_p_op.cc | 8 +- paddle/fluid/operators/prim_ops/mul_p_op.cc | 6 +- .../fluid/operators/prim_ops/reduce_p_op.cc | 4 +- .../fluid/operators/prim_ops/reshape_p_op.cc | 4 +- .../operators/prim_ops/scatter_add_p_op.cc | 8 +- .../operators/prim_ops/slice_assign_p_op.cc | 6 +- .../operators/prim_ops/slice_select_p_op.cc | 4 +- paddle/fluid/operators/prim_ops/split_p_op.cc | 6 +- paddle/fluid/operators/prim_ops/sqrt_p_op.cc | 4 +- paddle/fluid/operators/prim_ops/sub_p_op.cc | 6 +- paddle/fluid/operators/prim_ops/tanh_p_op.cc | 4 +- .../operators/prim_ops/transpose_p_op.cc | 4 +- paddle/fluid/operators/py_func_op.cc | 8 +- paddle/fluid/operators/randperm_op.cc | 4 +- .../reader/create_custom_reader_op.cc | 8 +- paddle/fluid/operators/reader/read_op.cc | 6 +- .../operators/reader/reader_op_registry.cc | 6 +- .../operators/reduce_ops/reduce_sum_op.cc | 2 +- paddle/fluid/operators/run_program_op.cc | 2 +- paddle/fluid/operators/segment_pool_op.cc | 2 +- .../sequence_ops/sequence_conv_op.cc | 4 +- .../sequence_ops/sequence_expand_as_op.cc | 4 +- .../sequence_ops/sequence_expand_op.cc | 4 +- .../operators/sequence_ops/sequence_pad_op.cc | 2 +- .../sequence_ops/sequence_pool_op.cc | 2 +- .../sequence_ops/sequence_scatter_op.cc | 4 +- paddle/fluid/operators/sync_batch_norm_op.cc | 2 +- paddle/fluid/operators/uniform_random_op.cc | 2 +- paddle/fluid/operators/var_conv_2d_op.cc | 6 +- .../fluid/platform/device/ipu/ipu_compiler.cc | 81 +++++----- .../popart_canonicalization/activation_ops.cc | 38 ++--- .../canonicalization_utils.cc | 2 +- .../elementwise_ops.cc | 2 +- .../ipu/popart_canonicalization/loss_ops.cc | 18 +-- .../ipu/popart_canonicalization/math_ops.cc | 34 ++-- .../ipu/popart_canonicalization/nn_ops.cc | 99 ++++++------ .../ipu/popart_canonicalization/other_ops.cc | 10 +- .../ipu/popart_canonicalization/reduce_ops.cc | 18 +-- .../ipu/popart_canonicalization/search_ops.cc | 12 +- .../ipu/popart_canonicalization/tensor_ops.cc | 105 ++++++------ .../platform/device/npu/npu_op_runner.cc | 26 +-- paddle/fluid/platform/enforce.h | 32 ++-- paddle/fluid/platform/enforce_test.cc | 8 +- paddle/fluid/platform/profiler.cc | 4 +- paddle/fluid/pybind/pybind.cc | 16 +- .../host_context/mlir_to_runtime_translate.h | 1 - tools/check_file_diff_approvals.sh | 6 - 276 files changed, 1318 insertions(+), 1288 deletions(-) diff --git a/paddle/fluid/distributed/fleet_executor/dist_model.cc b/paddle/fluid/distributed/fleet_executor/dist_model.cc index 3c4877181978b0..e641d6311c6ce0 100644 --- a/paddle/fluid/distributed/fleet_executor/dist_model.cc +++ b/paddle/fluid/distributed/fleet_executor/dist_model.cc @@ -416,7 +416,7 @@ bool DistModel::PrepareFeedAndFetch() { for (auto *op : program_->Block(0).AllOps()) { if (op->Type() == "feed") { VLOG(3) << "feed op with feed var: " << op->Output("Out")[0]; - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); if (feeds_.size() <= static_cast(idx)) { feeds_.resize(idx + 1); } @@ -446,7 +446,7 @@ bool DistModel::PrepareFeedAndFetch() { } } else if (op->Type() == "fetch") { VLOG(3) << "fetch op with fetch var: " << op->Input("X")[0]; - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); if (fetches_.size() <= static_cast(idx)) { fetches_.resize(idx + 1); } @@ -507,7 +507,7 @@ bool DistModel::FetchResults(std::vector *output_data, VLOG(3) << "DistModel is fetch results."; output_data->resize(fetches_.size()); for (size_t i = 0; i < fetches_.size(); ++i) { - int idx = BOOST_GET_CONST(int, fetches_[i]->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, fetches_[i]->GetAttr("col")); VLOG(3) << "Fetching data for [" << idx_to_fetches_[idx] << "]"; PADDLE_ENFORCE_EQ( static_cast(idx), @@ -518,7 +518,7 @@ bool DistModel::FetchResults(std::vector *output_data, i)); framework::FetchType &fetch_var = framework::GetFetchVariable(*scope, "fetch", idx); - auto &fetch = BOOST_GET(framework::LoDTensor, fetch_var); + auto &fetch = PADDLE_GET(framework::LoDTensor, fetch_var); auto type = framework::TransToProtoVarType(fetch.dtype()); auto output = &(output_data->at(i)); output->name = idx_to_fetches_[idx]; diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc index b058fa50acdd91..ea1bc2271c1948 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_attention_fwd_func.cc @@ -398,7 +398,7 @@ fused_attention_dygraph_function( bool pre_layer_norm = false; if (attrs.count("pre_layer_norm")) { - pre_layer_norm = BOOST_GET_CONST(bool, attrs.at("pre_layer_norm")); + pre_layer_norm = PADDLE_GET_CONST(bool, attrs.at("pre_layer_norm")); } // Set Attributes diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc index e246649314b523..ad0602a17b7718 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_feedforward_fwd_func.cc @@ -318,7 +318,7 @@ fused_feedforward_dygraph_function( bool pre_layer_norm = false; if (attrs.count("pre_layer_norm")) { - pre_layer_norm = BOOST_GET_CONST(bool, attrs.at("pre_layer_norm")); + pre_layer_norm = PADDLE_GET_CONST(bool, attrs.at("pre_layer_norm")); } // Set Attributes diff --git a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc index 81b4db4df207e2..eb48aa4cddffb6 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/forwards/fused_gate_attention_fwd_func.cc @@ -303,12 +303,12 @@ fused_gate_attention_dygraph_function( bool merge_qkv = true; if (attrs.count("merge_qkv")) { - merge_qkv = BOOST_GET_CONST(bool, attrs.at("merge_qkv")); + merge_qkv = PADDLE_GET_CONST(bool, attrs.at("merge_qkv")); } bool has_gating = true; if (attrs.count("has_gating")) { - has_gating = BOOST_GET_CONST(bool, attrs.at("has_gating")); + has_gating = PADDLE_GET_CONST(bool, attrs.at("has_gating")); } // Set Attributes diff --git a/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_attention_node.cc b/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_attention_node.cc index 990cfb5226dbbb..11445880fe8b88 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_attention_node.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_attention_node.cc @@ -38,7 +38,7 @@ fused_attentionGradNodeCompat::operator()( bool pre_layer_norm = false; if (attr_map_.count("pre_layer_norm")) { - pre_layer_norm = BOOST_GET_CONST(bool, attr_map_.at("pre_layer_norm")); + pre_layer_norm = PADDLE_GET_CONST(bool, attr_map_.at("pre_layer_norm")); } std::map>> ins0 = diff --git a/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_feedforward_node.cc b/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_feedforward_node.cc index 5228cb3657825e..b5907f444c854a 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_feedforward_node.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_feedforward_node.cc @@ -40,7 +40,7 @@ fused_feedforwardGradNodeCompat::operator()( bool pre_layer_norm = false; if (attr_map_.count("pre_layer_norm")) { - pre_layer_norm = BOOST_GET_CONST(bool, attr_map_.at("pre_layer_norm")); + pre_layer_norm = PADDLE_GET_CONST(bool, attr_map_.at("pre_layer_norm")); } std::map>> ins0 = diff --git a/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_gate_attention_node.cc b/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_gate_attention_node.cc index a1ccaf09de8b42..f9911798b1ad02 100644 --- a/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_gate_attention_node.cc +++ b/paddle/fluid/eager/api/manual/fluid_manual/nodes/fused_gate_attention_node.cc @@ -40,12 +40,12 @@ fused_gate_attentionGradNodeCompat::operator()( bool merge_qkv = true; if (attr_map_.count("merge_qkv")) { - merge_qkv = BOOST_GET_CONST(bool, attr_map_.at("merge_qkv")); + merge_qkv = PADDLE_GET_CONST(bool, attr_map_.at("merge_qkv")); } bool has_gating = true; if (attr_map_.count("has_gating")) { - has_gating = BOOST_GET_CONST(bool, attr_map_.at("has_gating")); + has_gating = PADDLE_GET_CONST(bool, attr_map_.at("has_gating")); } std::map>> ins0 = diff --git a/paddle/fluid/eager/auto_code_generator/eager_generator.cc b/paddle/fluid/eager/auto_code_generator/eager_generator.cc index 54b40c72d02152..64e46e789b4062 100644 --- a/paddle/fluid/eager/auto_code_generator/eager_generator.cc +++ b/paddle/fluid/eager/auto_code_generator/eager_generator.cc @@ -352,7 +352,7 @@ static typename std::enable_if::type GetAttrValue( const framework::Attribute& attr) { std::string val = ""; val += "{"; - for (auto x : BOOST_GET_CONST(std::vector, attr)) { + for (auto x : PADDLE_GET_CONST(std::vector, attr)) { val += std::to_string(x) + ","; } if (val.size() > 1) val.pop_back(); @@ -363,7 +363,7 @@ static typename std::enable_if::type GetAttrValue( template static typename std::enable_if::type GetAttrValue( const framework::Attribute& attr) { - return std::to_string(BOOST_GET_CONST(T, attr)); + return std::to_string(PADDLE_GET_CONST(T, attr)); } static std::pair GetAttrType( @@ -385,7 +385,7 @@ static std::pair GetAttrType( case (3): { ret = "std::string"; if (is_arg) ret += "&"; - val = "\"" + BOOST_GET_CONST(std::string, attr) + "\""; + val = "\"" + PADDLE_GET_CONST(std::string, attr) + "\""; break; } case (4): { @@ -404,7 +404,7 @@ static std::pair GetAttrType( ret = "std::vector"; if (is_arg) ret += "&"; val += "{"; - for (auto x : BOOST_GET_CONST(std::vector, attr)) { + for (auto x : PADDLE_GET_CONST(std::vector, attr)) { val += "\"" + x + "\"" + ","; } if (val.size() > 1) val.pop_back(); diff --git a/paddle/fluid/eager/to_static/run_program_op_node.h b/paddle/fluid/eager/to_static/run_program_op_node.h index 2af2bd369b42ba..50f9d0b58ca32b 100644 --- a/paddle/fluid/eager/to_static/run_program_op_node.h +++ b/paddle/fluid/eager/to_static/run_program_op_node.h @@ -191,16 +191,16 @@ inline void RunProgramAPI( std::vector &dout, // NOLINT const paddle::framework::AttributeMap &attrs) { VLOG(2) << "RunProgramOpKernel Compute"; - auto start_op_index = BOOST_GET_CONST(int64_t, attrs.at("start_op_index")); - auto end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index")); + auto start_op_index = PADDLE_GET_CONST(int64_t, attrs.at("start_op_index")); + auto end_op_index = PADDLE_GET_CONST(int64_t, attrs.at("end_op_index")); // In the original run_program OP, the default value of the is_test // attribute is false, we should check if there is is_test parameter // in attrs auto is_test = false; if (attrs.count("is_test")) { - is_test = BOOST_GET_CONST(bool, attrs.at("is_test")); + is_test = PADDLE_GET_CONST(bool, attrs.at("is_test")); } - auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id")); + auto program_id = PADDLE_GET_CONST(int64_t, attrs.at("program_id")); // NOTE(chenweihang): In order not to add new variable type, use vector // here. Originally, here can use scope directly. @@ -226,8 +226,8 @@ inline void RunProgramAPI( details::ShareTensorsIntoScope(x, &scope); details::ShareTensorsIntoScope(params, &scope); - auto *global_block = - BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block")); + auto *global_block = PADDLE_GET_CONST(paddle::framework::BlockDesc *, + attrs.at("global_block")); const auto &place = egr::Controller::Instance().GetExpectedPlace(); if (end_op_index > start_op_index) { @@ -292,11 +292,11 @@ inline void RunProgramGradAPI( // if all output vars are set to stop_gradient, grad op no need to executed if (x_grad.empty() && params_grad.empty()) return; - auto *global_block = - BOOST_GET_CONST(paddle::framework::BlockDesc *, attrs.at("global_block")); - auto orig_end_op_index = BOOST_GET_CONST(int64_t, attrs.at("end_op_index")); + auto *global_block = PADDLE_GET_CONST(paddle::framework::BlockDesc *, + attrs.at("global_block")); + auto orig_end_op_index = PADDLE_GET_CONST(int64_t, attrs.at("end_op_index")); - auto program_id = BOOST_GET_CONST(int64_t, attrs.at("program_id")); + auto program_id = PADDLE_GET_CONST(int64_t, attrs.at("program_id")); // NOTE: skip `shape` and `fill_constant` op created by // fluid.backward.gradients, one forward output will generate one `shape` // and `fill_constant` diff --git a/paddle/fluid/framework/attribute.cc b/paddle/fluid/framework/attribute.cc index a2d0f2db2829d0..13f175ce0b1cd9 100644 --- a/paddle/fluid/framework/attribute.cc +++ b/paddle/fluid/framework/attribute.cc @@ -21,31 +21,31 @@ namespace framework { paddle::any GetAttrValue(const Attribute& attr) { switch (AttrTypeID(attr)) { case proto::AttrType::INT: - return BOOST_GET_CONST(int, attr); + return PADDLE_GET_CONST(int, attr); case proto::AttrType::FLOAT: - return BOOST_GET_CONST(float, attr); + return PADDLE_GET_CONST(float, attr); case proto::AttrType::STRING: - return BOOST_GET_CONST(std::string, attr); + return PADDLE_GET_CONST(std::string, attr); case proto::AttrType::INTS: - return BOOST_GET_CONST(std::vector, attr); + return PADDLE_GET_CONST(std::vector, attr); case proto::AttrType::FLOATS: - return BOOST_GET_CONST(std::vector, attr); + return PADDLE_GET_CONST(std::vector, attr); case proto::AttrType::STRINGS: - return BOOST_GET_CONST(std::vector, attr); + return PADDLE_GET_CONST(std::vector, attr); case proto::AttrType::BOOLEAN: - return BOOST_GET_CONST(bool, attr); + return PADDLE_GET_CONST(bool, attr); case proto::AttrType::BOOLEANS: - return BOOST_GET_CONST(std::vector, attr); + return PADDLE_GET_CONST(std::vector, attr); case proto::AttrType::LONG: - return BOOST_GET_CONST(int64_t, attr); + return PADDLE_GET_CONST(int64_t, attr); case proto::AttrType::LONGS: - return BOOST_GET_CONST(std::vector, attr); + return PADDLE_GET_CONST(std::vector, attr); case proto::AttrType::FLOAT64S: - return BOOST_GET_CONST(std::vector, attr); + return PADDLE_GET_CONST(std::vector, attr); case proto::AttrType::BLOCK: - return BOOST_GET_CONST(BlockDesc*, attr); + return PADDLE_GET_CONST(BlockDesc*, attr); case proto::AttrType::BLOCKS: - return BOOST_GET_CONST(std::vector, attr); + return PADDLE_GET_CONST(std::vector, attr); default: PADDLE_THROW(platform::errors::Unimplemented( "Unsupported Attribute value type `%s` for phi.", diff --git a/paddle/fluid/framework/attribute.h b/paddle/fluid/framework/attribute.h index f9cd5c73836622..a149c18f542e25 100644 --- a/paddle/fluid/framework/attribute.h +++ b/paddle/fluid/framework/attribute.h @@ -72,10 +72,10 @@ struct ExtractAttribute { bool* operator()(Attribute& attr) const { if (attr.type() == typeid(int)) { // NOLINT - int val = BOOST_GET_CONST(int, attr); + int val = PADDLE_GET_CONST(int, attr); attr = static_cast(val); } else if (attr.type() == typeid(float)) { // NOLINT - float val = BOOST_GET_CONST(float, attr); + float val = PADDLE_GET_CONST(float, attr); attr = static_cast(val); } bool* attr_value = nullptr; @@ -100,10 +100,10 @@ struct ExtractAttribute { int64_t* operator()(Attribute& attr) const { if (attr.type() == typeid(int)) { // NOLINT - int val = BOOST_GET_CONST(int, attr); + int val = PADDLE_GET_CONST(int, attr); attr = static_cast(val); } else if (attr.type() == typeid(float)) { // NOLINT - int val = BOOST_GET_CONST(float, attr); + int val = PADDLE_GET_CONST(float, attr); attr = static_cast(val); } int64_t* attr_value = nullptr; @@ -128,11 +128,11 @@ struct ExtractAttribute> { std::vector* operator()(Attribute& attr) const { if (attr.type() == typeid(std::vector)) { // NOLINT - std::vector val = BOOST_GET_CONST(std::vector, attr); + std::vector val = PADDLE_GET_CONST(std::vector, attr); std::vector vec(val.begin(), val.end()); attr = vec; } else if (attr.type() == typeid(std::vector)) { // NOLINT - std::vector val = BOOST_GET_CONST(std::vector, attr); + std::vector val = PADDLE_GET_CONST(std::vector, attr); std::vector vec(val.begin(), val.end()); attr = vec; } @@ -159,10 +159,10 @@ struct ExtractAttribute { float* operator()(Attribute& attr) const { if (attr.type() == typeid(int)) { // NOLINT - int val = BOOST_GET_CONST(int, attr); + int val = PADDLE_GET_CONST(int, attr); attr = static_cast(val); } else if (attr.type() == typeid(int64_t)) { // NOLINT - int64_t val = BOOST_GET_CONST(int64_t, attr); + int64_t val = PADDLE_GET_CONST(int64_t, attr); attr = static_cast(val); } float* attr_value = nullptr; @@ -187,11 +187,11 @@ struct ExtractAttribute> { std::vector* operator()(Attribute& attr) const { if (attr.type() == typeid(std::vector)) { // NOLINT - std::vector val = BOOST_GET_CONST(std::vector, attr); + std::vector val = PADDLE_GET_CONST(std::vector, attr); std::vector vec(val.begin(), val.end()); attr = vec; } else if (attr.type() == typeid(std::vector)) { // NOLINT - std::vector val = BOOST_GET_CONST(std::vector, attr); + std::vector val = PADDLE_GET_CONST(std::vector, attr); std::vector vec(val.begin(), val.end()); attr = vec; } diff --git a/paddle/fluid/framework/block_desc.cc b/paddle/fluid/framework/block_desc.cc index 9d62fd8100b083..84d52c996d0562 100644 --- a/paddle/fluid/framework/block_desc.cc +++ b/paddle/fluid/framework/block_desc.cc @@ -274,11 +274,12 @@ void BlockDesc::MoveFrom(BlockDesc *block) { const auto &attr_value = pair.second; auto attr_type = static_cast(attr_value.index() - 1); if (attr_type == proto::AttrType::BLOCK) { - auto block_id = BOOST_GET_CONST(BlockDesc *, attr_value)->ID(); + auto block_id = PADDLE_GET_CONST(BlockDesc *, attr_value)->ID(); dst_op->SetBlockAttr(attr_name, prog_->MutableBlock(block_id)); VLOG(10) << "Set block attr " << attr_name << " id " << block_id; } else if (attr_type == proto::AttrType::BLOCKS) { - auto old_blocks = BOOST_GET_CONST(std::vector, attr_value); + auto old_blocks = + PADDLE_GET_CONST(std::vector, attr_value); std::vector new_blocks; new_blocks.reserve(old_blocks.size()); for (auto *b : old_blocks) { diff --git a/paddle/fluid/framework/details/async_ssa_graph_executor.cc b/paddle/fluid/framework/details/async_ssa_graph_executor.cc index 0ae69695549e52..9e22033d864234 100644 --- a/paddle/fluid/framework/details/async_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/async_ssa_graph_executor.cc @@ -174,16 +174,16 @@ FetchResultType AsyncSSAGraphExecutor::Run( HandleException(); FetchList ret; - auto &val = BOOST_GET(FetchList, fetch_data); + auto &val = PADDLE_GET(FetchList, fetch_data); for (size_t fetch_idx = 0; fetch_idx < fetch_tensors.size(); ++fetch_idx) { if (data_is_lod_tensor(val.at(fetch_idx))) { std::vector lodtensor_ptrs; - lodtensor_ptrs.push_back(&(BOOST_GET(LoDTensor, val.at(fetch_idx)))); + lodtensor_ptrs.push_back(&(PADDLE_GET(LoDTensor, val.at(fetch_idx)))); LoDTensor var; MergeLoDTensor(&var, lodtensor_ptrs, platform::CPUPlace()); ret.emplace_back(var); } else { - auto array = BOOST_GET(LoDTensorArray, val.at(fetch_idx)); + auto array = PADDLE_GET(LoDTensorArray, val.at(fetch_idx)); LoDTensorArray item_array; item_array.reserve(array.size()); for (size_t i = 0; i < array.size(); ++i) { diff --git a/paddle/fluid/framework/details/fetch_async_op_handle.cc b/paddle/fluid/framework/details/fetch_async_op_handle.cc index a9e4bf826bc4b2..adf49c81c049a8 100644 --- a/paddle/fluid/framework/details/fetch_async_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_async_op_handle.cc @@ -228,7 +228,7 @@ void FetchAsyncOpHandle::RunImpl() { } if (return_merged_) { - auto &val = BOOST_GET(FetchList, *data_); + auto &val = PADDLE_GET(FetchList, *data_); if (src_vars[0]->IsType()) { // to lodtensor type std::vector src_lodtensors; @@ -263,7 +263,7 @@ void FetchAsyncOpHandle::RunImpl() { val.at(offset_) = std::move(dst_lodtensor_array); } } else { - auto &val = BOOST_GET(FetchUnmergedList, *data_); + auto &val = PADDLE_GET(FetchUnmergedList, *data_); auto &dst_tensors = val.at(offset_); dst_tensors.reserve(src_vars.size()); diff --git a/paddle/fluid/framework/details/fetch_op_handle.cc b/paddle/fluid/framework/details/fetch_op_handle.cc index a9f7de8ee312f9..411fbcd35571f9 100644 --- a/paddle/fluid/framework/details/fetch_op_handle.cc +++ b/paddle/fluid/framework/details/fetch_op_handle.cc @@ -74,22 +74,22 @@ static void CheckDims(const framework::DDim &tensor_dims, void FetchOpHandle::WaitAndMergeCPUFetchVars() const { if (return_merged_) { if (data_is_lod_tensor(tensors_[0])) { - const auto &tensor_dims = BOOST_GET_CONST(LoDTensor, tensors_[0]).dims(); + const auto &tensor_dims = PADDLE_GET_CONST(LoDTensor, tensors_[0]).dims(); for (size_t i = 1; i < tensors_.size(); i++) { - const auto &ele_dims = BOOST_GET_CONST(LoDTensor, tensors_[i]).dims(); + const auto &ele_dims = PADDLE_GET_CONST(LoDTensor, tensors_[i]).dims(); CheckDims(tensor_dims, ele_dims, offset_); } std::vector tensors_ptr; tensors_ptr.reserve(tensors_.size()); for (auto &t : tensors_) { - tensors_ptr.emplace_back(&BOOST_GET_CONST(LoDTensor, t)); + tensors_ptr.emplace_back(&PADDLE_GET_CONST(LoDTensor, t)); } - auto &val = BOOST_GET(FetchList, *data_); + auto &val = PADDLE_GET(FetchList, *data_); LoDTensor var; MergeLoDTensor(&var, tensors_ptr, platform::CPUPlace()); val.at(offset_) = std::move(var); } else { - auto &array = BOOST_GET_CONST(LoDTensorArray, tensors_[0]); + auto &array = PADDLE_GET_CONST(LoDTensorArray, tensors_[0]); LoDTensorArray tmp_array; tmp_array.reserve(array.size()); for (size_t i = 0; i < array.size(); ++i) { @@ -98,7 +98,7 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const { tensors_ptr.reserve(tensors_.size()); tensors_ptr.push_back(&array[i]); for (size_t j = 1; j < tensors_.size(); ++j) { - auto &element = BOOST_GET_CONST(LoDTensorArray, tensors_[j]); + auto &element = PADDLE_GET_CONST(LoDTensorArray, tensors_[j]); const auto &ele_dims = element[i].dims(); CheckDims(tensor_dims, ele_dims, offset_); tensors_ptr.push_back(&element[i]); @@ -106,11 +106,11 @@ void FetchOpHandle::WaitAndMergeCPUFetchVars() const { tmp_array.emplace_back(); MergeLoDTensor(&(tmp_array.back()), tensors_ptr, platform::CPUPlace()); } - auto &val = BOOST_GET(FetchList, *data_); + auto &val = PADDLE_GET(FetchList, *data_); val.at(offset_) = std::move(tmp_array); } } else { - auto &val = BOOST_GET(FetchUnmergedList, *data_); + auto &val = PADDLE_GET(FetchUnmergedList, *data_); val.at(offset_) = std::move(tensors_); } } @@ -151,13 +151,13 @@ void FetchOpHandle::RunImpl() { if (var->IsType()) { auto &t = var->Get(); - auto &item = BOOST_GET(LoDTensor, tensors_[i]); + auto &item = PADDLE_GET(LoDTensor, tensors_[i]); TransData(t, &item); } else { auto &t = var->Get(); LoDTensorArray tmp(t.size()); tensors_[i] = tmp; - auto &item = BOOST_GET(LoDTensorArray, tensors_[i]); + auto &item = PADDLE_GET(LoDTensorArray, tensors_[i]); for (size_t j = 0; j < t.size(); ++j) { TransData(t[j], &item[j]); } diff --git a/paddle/fluid/framework/details/multi_devices_helper.h b/paddle/fluid/framework/details/multi_devices_helper.h index b963e0858cdf34..a3a3b993b7ec14 100644 --- a/paddle/fluid/framework/details/multi_devices_helper.h +++ b/paddle/fluid/framework/details/multi_devices_helper.h @@ -88,7 +88,7 @@ inline bool IsOpRole(const OpDesc &op, OpRole role) { const auto &attrs = op.GetAttrMap(); auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleAttrName()); if (iter == attrs.end()) return false; - return static_cast(BOOST_GET_CONST(int, iter->second) & + return static_cast(PADDLE_GET_CONST(int, iter->second) & static_cast(role)); } @@ -96,7 +96,7 @@ inline std::vector GetOpRoleVarsOrEmpty(const OpDesc &op) { const auto &attrs = op.GetAttrMap(); auto iter = attrs.find(OpProtoAndCheckerMaker::OpRoleVarAttrName()); if (iter == attrs.end()) return {}; - auto &ret = BOOST_GET_CONST(std::vector, iter->second); + auto &ret = PADDLE_GET_CONST(std::vector, iter->second); PADDLE_ENFORCE_EQ( ret.size() % 2, 0, @@ -104,7 +104,7 @@ inline std::vector GetOpRoleVarsOrEmpty(const OpDesc &op) { "The size of attribute %s must be an even number, but got %d", OpProtoAndCheckerMaker::OpRoleVarAttrName(), ret.size())); - return BOOST_GET_CONST(std::vector, iter->second); + return PADDLE_GET_CONST(std::vector, iter->second); } bool IsDataParallelInferenceGraph(const ir::Graph &graph); diff --git a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc index bc870c0eaa18d9..3918c718e93497 100644 --- a/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc +++ b/paddle/fluid/framework/details/parallel_ssa_graph_executor.cc @@ -279,13 +279,13 @@ FetchResultType ParallelSSAGraphExecutor::Run( continue; } const auto &fetch_list = - BOOST_GET_CONST(FetchList, fetch_data[scope_idx]); + PADDLE_GET_CONST(FetchList, fetch_data[scope_idx]); if (data_is_lod_tensor(fetch_list[fetch_idx])) { lodtensor_ptrs.push_back( - &(BOOST_GET_CONST(LoDTensor, fetch_list[fetch_idx]))); + &(PADDLE_GET_CONST(LoDTensor, fetch_list[fetch_idx]))); } else { lodtensorarray_ptrs.push_back( - &(BOOST_GET_CONST(LoDTensorArray, fetch_list[fetch_idx]))); + &(PADDLE_GET_CONST(LoDTensorArray, fetch_list[fetch_idx]))); } } if (lodtensor_ptrs.size() != 0) { @@ -318,7 +318,7 @@ FetchResultType ParallelSSAGraphExecutor::Run( continue; } const auto &fetch_list = - BOOST_GET_CONST(FetchUnmergedList, fetch_data[scope_idx]); + PADDLE_GET_CONST(FetchUnmergedList, fetch_data[scope_idx]); PADDLE_ENFORCE_EQ( fetch_list[fetch_idx].size(), 1, diff --git a/paddle/fluid/framework/executor.cc b/paddle/fluid/framework/executor.cc index a7313b2659a0c7..7b73b330375ec8 100644 --- a/paddle/fluid/framework/executor.cc +++ b/paddle/fluid/framework/executor.cc @@ -645,7 +645,7 @@ void Executor::RunPreparedContext( for (auto* op : global_block.AllOps()) { if (op->Type() == kFeedOpType) { std::string feed_target_name = op->Output("Out")[0]; - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); SetFeedVariable( scope, *(*feed_targets)[feed_target_name], feed_holder_name, idx); } @@ -657,7 +657,7 @@ void Executor::RunPreparedContext( for (auto* op : global_block.AllOps()) { if (op->Type() == kFetchOpType) { std::string fetch_target_name = op->Input("X")[0]; - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); *(*fetch_targets)[fetch_target_name] = GetFetchVariable(*scope, fetch_holder_name, idx); } diff --git a/paddle/fluid/framework/feed_fetch_method.cc b/paddle/fluid/framework/feed_fetch_method.cc index 47bb60810eb487..edf64aca10700a 100644 --- a/paddle/fluid/framework/feed_fetch_method.cc +++ b/paddle/fluid/framework/feed_fetch_method.cc @@ -40,7 +40,7 @@ void SetFeedVariable(Scope* scope, feed_inputs.resize(index + 1); } // shared data with input tensor - auto& val = BOOST_GET(LoDTensor, feed_inputs[index]); + auto& val = PADDLE_GET(LoDTensor, feed_inputs[index]); val.ShareDataWith(input); // set lod val.set_lod(input.lod()); diff --git a/paddle/fluid/framework/grad_op_desc_maker.h b/paddle/fluid/framework/grad_op_desc_maker.h index db558ad6a5fe71..7f6fc4690b8774 100644 --- a/paddle/fluid/framework/grad_op_desc_maker.h +++ b/paddle/fluid/framework/grad_op_desc_maker.h @@ -173,7 +173,7 @@ class GradOpDescMakerBase { template inline const T& Attr(const std::string& name) const { - return BOOST_GET_CONST(T, GetAttr(name)); + return PADDLE_GET_CONST(T, GetAttr(name)); } std::string ForwardOpType() const { return this->fwd_op_.Type(); } diff --git a/paddle/fluid/framework/infershape_utils.cc b/paddle/fluid/framework/infershape_utils.cc index 1eedadebfc88a6..c525888ca116c9 100644 --- a/paddle/fluid/framework/infershape_utils.cc +++ b/paddle/fluid/framework/infershape_utils.cc @@ -129,17 +129,17 @@ class InferShapeArgumentMappingContext : public phi::ArgumentMappingContext { int64_t CompatMetaTensor::numel() const { if (is_runtime_) { - auto* var = BOOST_GET_CONST(Variable*, var_); + auto* var = PADDLE_GET_CONST(Variable*, var_); return var->Get().numel(); } else { - auto* var = BOOST_GET_CONST(VarDesc*, var_); + auto* var = PADDLE_GET_CONST(VarDesc*, var_); return var->ElementSize(); } } DDim CompatMetaTensor::dims() const { if (is_runtime_) { - auto* var = BOOST_GET_CONST(Variable*, var_); + auto* var = PADDLE_GET_CONST(Variable*, var_); if (var->IsType()) { return var->Get().dims(); } else if (var->IsType()) { @@ -154,7 +154,7 @@ DDim CompatMetaTensor::dims() const { "DenseTensorArray.")); } } else { - auto* var = BOOST_GET_CONST(VarDesc*, var_); + auto* var = PADDLE_GET_CONST(VarDesc*, var_); return var->GetShape().empty() ? phi::make_ddim({0UL}) : phi::make_ddim(var->GetShape()); @@ -163,7 +163,7 @@ DDim CompatMetaTensor::dims() const { phi::DataType CompatMetaTensor::dtype() const { if (is_runtime_) { - auto* var = BOOST_GET_CONST(Variable*, var_); + auto* var = PADDLE_GET_CONST(Variable*, var_); if (var->IsType()) { return var->Get().dtype(); } else if (var->IsType()) { @@ -177,14 +177,14 @@ phi::DataType CompatMetaTensor::dtype() const { "Currently, only can get dtype from DenseTensor or SelectedRows.")); } } else { - auto* var = BOOST_GET_CONST(VarDesc*, var_); + auto* var = PADDLE_GET_CONST(VarDesc*, var_); return paddle::framework::TransToPhiDataType(var->GetDataType()); } } DataLayout CompatMetaTensor::layout() const { if (is_runtime_) { - auto* var = BOOST_GET_CONST(Variable*, var_); + auto* var = PADDLE_GET_CONST(Variable*, var_); if (var->IsType()) { return var->Get().layout(); } else if (var->IsType()) { @@ -207,7 +207,7 @@ DataLayout CompatMetaTensor::layout() const { void CompatMetaTensor::set_dims(const DDim& dims) { if (is_runtime_) { - auto* var = BOOST_GET(Variable*, var_); + auto* var = PADDLE_GET(Variable*, var_); if (var->IsType()) { auto* tensor = var->GetMutable(); phi::DenseTensorUtils::GetMutableMeta(tensor)->dims = dims; @@ -230,14 +230,14 @@ void CompatMetaTensor::set_dims(const DDim& dims) { "Currently, only can set dims from DenseTensor or SelectedRows.")); } } else { - auto* var = BOOST_GET(VarDesc*, var_); + auto* var = PADDLE_GET(VarDesc*, var_); var->SetShape(vectorize(dims)); } } void CompatMetaTensor::set_dtype(phi::DataType dtype) { if (is_runtime_) { - auto* var = BOOST_GET(Variable*, var_); + auto* var = PADDLE_GET(Variable*, var_); if (var->IsType()) { auto* tensor = var->GetMutable(); phi::DenseTensorUtils::GetMutableMeta(tensor)->dtype = dtype; @@ -252,14 +252,14 @@ void CompatMetaTensor::set_dtype(phi::DataType dtype) { "Currently, only can set dtype from DenseTensor or SelectedRows.")); } } else { - auto* var = BOOST_GET(VarDesc*, var_); + auto* var = PADDLE_GET(VarDesc*, var_); var->SetDataType(paddle::framework::TransToProtoVarType(dtype)); } } void CompatMetaTensor::set_layout(DataLayout layout) { if (is_runtime_) { - auto* var = BOOST_GET(Variable*, var_); + auto* var = PADDLE_GET(Variable*, var_); if (var->IsType()) { auto* tensor = var->GetMutable(); phi::DenseTensorUtils::GetMutableMeta(tensor)->layout = layout; @@ -282,7 +282,7 @@ void CompatMetaTensor::set_layout(DataLayout layout) { void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) { if (is_runtime_) { - auto* var = BOOST_GET(Variable*, var_); + auto* var = PADDLE_GET(Variable*, var_); if (var->IsType()) { auto* tensor = var->GetMutable(); phi::DenseTensorUtils::GetMutableMeta(tensor)->lod = @@ -292,7 +292,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) { // only LoDTensor need to share lod } } else { - auto* var = BOOST_GET(VarDesc*, var_); + auto* var = PADDLE_GET(VarDesc*, var_); var->SetLoDLevel( static_cast(meta_tensor).GetCompileTimeLoD()); } @@ -301,7 +301,7 @@ void CompatMetaTensor::share_lod(const MetaTensor& meta_tensor) { void CompatMetaTensor::share_dims(const MetaTensor& meta_tensor) { set_dims(meta_tensor.dims()); if (is_runtime_) { - auto* var = BOOST_GET(Variable*, var_); + auto* var = PADDLE_GET(Variable*, var_); if (var->IsType()) { auto* selected_rows = var->GetMutable(); auto& input_selected_rows = @@ -461,15 +461,15 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, switch (AttrTypeID(attr)) { case framework::proto::AttrType::FLOAT: infer_meta_context.EmplaceBackAttr( - phi::Scalar(BOOST_GET_CONST(float, attr))); + phi::Scalar(PADDLE_GET_CONST(float, attr))); break; case framework::proto::AttrType::INT: infer_meta_context.EmplaceBackAttr( - phi::Scalar(BOOST_GET_CONST(int, attr))); + phi::Scalar(PADDLE_GET_CONST(int, attr))); break; case framework::proto::AttrType::STRING: infer_meta_context.EmplaceBackAttr( - phi::Scalar(BOOST_GET_CONST(std::string, attr))); + phi::Scalar(PADDLE_GET_CONST(std::string, attr))); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -481,7 +481,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, auto infershape_input = std::move(ctx->GetInputVarPtrs(attr_name)); if (infershape_input.size() == 1) { if (ctx->IsRuntime()) { - Variable* var = BOOST_GET_CONST(Variable*, infershape_input[0]); + Variable* var = PADDLE_GET_CONST(Variable*, infershape_input[0]); infer_meta_context.EmplaceBackAttr( std::move(experimental::MakePhiScalarFromVar(*var))); } else { @@ -507,15 +507,15 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, switch (AttrTypeID(attr)) { case framework::proto::AttrType::INTS: infer_meta_context.EmplaceBackAttr(std::move( - phi::IntArray(BOOST_GET_CONST(std::vector, attr)))); + phi::IntArray(PADDLE_GET_CONST(std::vector, attr)))); break; case framework::proto::AttrType::LONGS: infer_meta_context.EmplaceBackAttr(std::move( - phi::IntArray(BOOST_GET_CONST(std::vector, attr)))); + phi::IntArray(PADDLE_GET_CONST(std::vector, attr)))); break; case framework::proto::AttrType::INT: infer_meta_context.EmplaceBackAttr( - phi::IntArray({BOOST_GET_CONST(int, attr)})); + phi::IntArray({PADDLE_GET_CONST(int, attr)})); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -531,7 +531,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, std::vector vars; vars.reserve(infershape_inputs.size()); for (size_t i = 0; i < infershape_inputs.size(); i++) { - vars.push_back(BOOST_GET_CONST(Variable*, infershape_inputs[i])); + vars.push_back(PADDLE_GET_CONST(Variable*, infershape_inputs[i])); } if (infershape_inputs.size() != 1) { infer_meta_context.EmplaceBackAttr( @@ -545,7 +545,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, std::vector vars; vars.reserve(infershape_inputs.size()); for (size_t i = 0; i < infershape_inputs.size(); ++i) { - vars.push_back(BOOST_GET_CONST(VarDesc*, infershape_inputs[i])); + vars.push_back(PADDLE_GET_CONST(VarDesc*, infershape_inputs[i])); } int64_t num_ele = 0; @@ -576,7 +576,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, auto& attr = *attr_ptr; switch (AttrTypeID(attr)) { case framework::proto::AttrType::INTS: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -585,7 +585,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, infer_meta_context.EmplaceBackAttr(std::move(scalar_list)); } break; case framework::proto::AttrType::LONGS: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -594,7 +594,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, infer_meta_context.EmplaceBackAttr(std::move(scalar_list)); } break; case framework::proto::AttrType::FLOATS: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -603,7 +603,7 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, infer_meta_context.EmplaceBackAttr(std::move(scalar_list)); } break; case framework::proto::AttrType::FLOAT64S: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -626,41 +626,41 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, auto& attr = *attr_ptr; switch (attr_defs[i].type_index) { case phi::AttributeType::FLOAT32: - infer_meta_context.EmplaceBackAttr(BOOST_GET_CONST(float, attr)); + infer_meta_context.EmplaceBackAttr(PADDLE_GET_CONST(float, attr)); break; case phi::AttributeType::INT32: - infer_meta_context.EmplaceBackAttr(BOOST_GET_CONST(int, attr)); + infer_meta_context.EmplaceBackAttr(PADDLE_GET_CONST(int, attr)); break; case phi::AttributeType::BOOL: - infer_meta_context.EmplaceBackAttr(BOOST_GET_CONST(bool, attr)); + infer_meta_context.EmplaceBackAttr(PADDLE_GET_CONST(bool, attr)); break; case phi::AttributeType::INT64: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(int64_t, attr)); + PADDLE_GET_CONST(int64_t, attr)); break; case phi::AttributeType::INT32S: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case phi::AttributeType::DATA_TYPE: { auto data_type = paddle::framework::TransToPhiDataType( static_cast( - BOOST_GET_CONST(int, attr))); + PADDLE_GET_CONST(int, attr))); infer_meta_context.EmplaceBackAttr(data_type); } break; case phi::AttributeType::STRING: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(std::string, attr)); + PADDLE_GET_CONST(std::string, attr)); break; case phi::AttributeType::INT64S: switch (AttrTypeID(attr)) { case framework::proto::AttrType::LONGS: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case framework::proto::AttrType::INTS: { const auto& vector_int_attr = - BOOST_GET_CONST(std::vector, attr); + PADDLE_GET_CONST(std::vector, attr); const std::vector vector_int64_attr( vector_int_attr.begin(), vector_int_attr.end()); infer_meta_context.EmplaceBackAttr(vector_int64_attr); @@ -675,19 +675,19 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, break; case phi::AttributeType::FLOAT32S: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case phi::AttributeType::STRINGS: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case phi::AttributeType::BOOLS: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case phi::AttributeType::FLOAT64S: infer_meta_context.EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -714,12 +714,12 @@ CompatInferMetaContext BuildInferMetaContext(InferShapeContext* ctx, outputs; for (const auto& out : output_var) { if (ctx->IsRuntime()) { - if (BOOST_GET_CONST(Variable*, out)) { + if (PADDLE_GET_CONST(Variable*, out)) { outputs.emplace_back( std::move(CompatMetaTensor(out, ctx->IsRuntime()))); continue; } - } else if (BOOST_GET_CONST(VarDesc*, out)) { + } else if (PADDLE_GET_CONST(VarDesc*, out)) { outputs.emplace_back( std::move(CompatMetaTensor(out, ctx->IsRuntime()))); continue; diff --git a/paddle/fluid/framework/infershape_utils.h b/paddle/fluid/framework/infershape_utils.h index 0cc15b84344ffb..1f745e5bf9be06 100644 --- a/paddle/fluid/framework/infershape_utils.h +++ b/paddle/fluid/framework/infershape_utils.h @@ -67,12 +67,12 @@ class CompatMetaTensor : public phi::MetaTensor { private: const LoD& GetRuntimeLoD() const { - auto* var = BOOST_GET_CONST(Variable*, var_); + auto* var = PADDLE_GET_CONST(Variable*, var_); return var->Get().lod(); } int32_t GetCompileTimeLoD() const { - auto* var = BOOST_GET_CONST(VarDesc*, var_); + auto* var = PADDLE_GET_CONST(VarDesc*, var_); return var->GetLoDLevel(); } @@ -81,7 +81,7 @@ class CompatMetaTensor : public phi::MetaTensor { true, platform::errors::Unavailable( "Only can get Tensor from MetaTensor in rumtime.")); - auto* var = BOOST_GET_CONST(Variable*, var_); + auto* var = PADDLE_GET_CONST(Variable*, var_); PADDLE_ENFORCE_EQ(var->IsType(), true, platform::errors::Unavailable( diff --git a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc index 2625cb48174b88..aa11994fb8e443 100644 --- a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc +++ b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass.cc @@ -77,17 +77,17 @@ void AdaptivePool2dConvertGlobalPass::ApplyImpl(ir::Graph* graph) const { op->HasAttr("ksize")) { if (op->HasAttr("global_pooling")) { bool global_pooling = - BOOST_GET_CONST(bool, op->GetAttr("global_pooling")); + PADDLE_GET_CONST(bool, op->GetAttr("global_pooling")); if (global_pooling) continue; } if (!op->HasAttr("pooling_type")) continue; std::string type = - BOOST_GET_CONST(std::string, op->GetAttr("pooling_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("pooling_type")); // adaptive has no effect on max pooling if (type == "max") continue; - bool adaptive = BOOST_GET_CONST(bool, op->GetAttr("adaptive")); + bool adaptive = PADDLE_GET_CONST(bool, op->GetAttr("adaptive")); std::vector ksize = - BOOST_GET_CONST(std::vector, op->GetAttr("ksize")); + PADDLE_GET_CONST(std::vector, op->GetAttr("ksize")); if (adaptive && ksize.size() == 2 && ksize[0] == 1 && ksize[1] == 1) { op->SetAttr("adaptive", false); op->SetAttr("global_pooling", true); diff --git a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass_tester.cc b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass_tester.cc index da237288c54808..1725a3e16cd4af 100644 --- a/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass_tester.cc +++ b/paddle/fluid/framework/ir/adaptive_pool2d_convert_global_pass_tester.cc @@ -46,7 +46,7 @@ TEST(AdaptivePool2dConvertGlobalPass, basic) { if (node->IsOp() && node->Op()->Type() == "pool2d") { if (node->Op()->HasAttr("global_pooling")) { global_pooling = - BOOST_GET_CONST(bool, node->Op()->GetAttr("global_pooling")); + PADDLE_GET_CONST(bool, node->Op()->GetAttr("global_pooling")); } } } diff --git a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc index 48261b59293966..e9248028cde72f 100644 --- a/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/conv_bn_fuse_pass.cc @@ -308,7 +308,7 @@ void ConvBNFusePass::ApplyImpl(ir::Graph* graph) const { // update weights and biases float epsilon = - BOOST_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon")); + PADDLE_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon")); recompute_bias_and_weights(scope, conv_weight, *bn_scale, @@ -552,7 +552,7 @@ void ConvEltwiseAddBNFusePass::ApplyImpl(ir::Graph* graph) const { // update weights and biases float epsilon = - BOOST_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon")); + PADDLE_GET_CONST(float, batch_norm->Op()->GetAttr("epsilon")); // if bias is an input to other ops as well then we cannot overwrite it // so we create separate elementwise Y in nodes diff --git a/paddle/fluid/framework/ir/cudnn_placement_pass_tester.cc b/paddle/fluid/framework/ir/cudnn_placement_pass_tester.cc index 022e67349d7d77..10a5377e08f81f 100644 --- a/paddle/fluid/framework/ir/cudnn_placement_pass_tester.cc +++ b/paddle/fluid/framework/ir/cudnn_placement_pass_tester.cc @@ -92,7 +92,7 @@ class PlacementPassTest { if (node->IsOp() && node->Op()) { auto* op = node->Op(); if (op->HasAttr("use_cudnn") && - BOOST_GET_CONST(bool, op->GetAttr("use_cudnn"))) { + PADDLE_GET_CONST(bool, op->GetAttr("use_cudnn"))) { ++use_cudnn_true_count; } } diff --git a/paddle/fluid/framework/ir/delete_fill_constant_op_pass.cc b/paddle/fluid/framework/ir/delete_fill_constant_op_pass.cc index e4b6e43e5c3dce..cd5cbf150b3a33 100644 --- a/paddle/fluid/framework/ir/delete_fill_constant_op_pass.cc +++ b/paddle/fluid/framework/ir/delete_fill_constant_op_pass.cc @@ -63,9 +63,9 @@ void DeleteFillConstantOpPass::ApplyImpl(ir::Graph* graph) const { Node* fill_constant_out_node = subgraph.at(fill_constant_out); // Get fill_constant's attr auto fill_constant = fill_constant_op_node->Op(); - auto value = BOOST_GET_CONST(float, fill_constant->GetAttr("value")); + auto value = PADDLE_GET_CONST(float, fill_constant->GetAttr("value")); auto shape = - BOOST_GET_CONST(std::vector, fill_constant->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, fill_constant->GetAttr("shape")); auto* scope = param_scope(); auto fill_constant_out_desc = fill_constant_out_node->Var(); fill_constant_out_desc->SetShape(shape); diff --git a/paddle/fluid/framework/ir/delete_quant_dequant_filter_op_pass.cc b/paddle/fluid/framework/ir/delete_quant_dequant_filter_op_pass.cc index 86639e4ff42466..2f95f476db5a02 100644 --- a/paddle/fluid/framework/ir/delete_quant_dequant_filter_op_pass.cc +++ b/paddle/fluid/framework/ir/delete_quant_dequant_filter_op_pass.cc @@ -96,7 +96,7 @@ void DeleteQuantDequantFilterOpPass::ApplyImpl(ir::Graph* graph) const { } std::unordered_set nodes2rm = {}; int bit_length = - BOOST_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length")); + PADDLE_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length")); int range = ((1 << (bit_length - 1)) - 1); std::vector weight_scale; std::string quant_dequant_op_out_name = quant_dequant_op_out->Var()->Name(); @@ -133,7 +133,7 @@ void DeleteQuantDequantFilterOpPass::ApplyImpl(ir::Graph* graph) const { // Get weight scale if (dequant_type == "fake_channel_wise_quantize_dequantize_abs_max") { int quant_axis = - BOOST_GET_CONST(int, quant_dequant_op->Op()->GetAttr("quant_axis")); + PADDLE_GET_CONST(int, quant_dequant_op->Op()->GetAttr("quant_axis")); PADDLE_ENFORCE_EQ(quant_axis == 0 || quant_axis == 1, true, platform::errors::InvalidArgument( diff --git a/paddle/fluid/framework/ir/delete_quant_dequant_linear_op_pass.cc b/paddle/fluid/framework/ir/delete_quant_dequant_linear_op_pass.cc index 08e8aa3b360c66..fc0a30bed0efb4 100644 --- a/paddle/fluid/framework/ir/delete_quant_dequant_linear_op_pass.cc +++ b/paddle/fluid/framework/ir/delete_quant_dequant_linear_op_pass.cc @@ -113,7 +113,7 @@ void DeleteQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { */ std::unordered_set nodes2rm = {}; int bit_length = - BOOST_GET_CONST(int, quantize_linear_op->Op()->GetAttr("bit_length")); + PADDLE_GET_CONST(int, quantize_linear_op->Op()->GetAttr("bit_length")); int range = ((1 << (bit_length - 1)) - 1); // Get input scale from tensor diff --git a/paddle/fluid/framework/ir/delete_quant_dequant_op_pass.cc b/paddle/fluid/framework/ir/delete_quant_dequant_op_pass.cc index e0d490ce836800..bae23b3cd66037 100644 --- a/paddle/fluid/framework/ir/delete_quant_dequant_op_pass.cc +++ b/paddle/fluid/framework/ir/delete_quant_dequant_op_pass.cc @@ -61,7 +61,7 @@ void DeleteQuantDequantOpPass::ApplyImpl(ir::Graph* graph) const { Node* input = subgraph.at(input_node); GET_NODES; int bit_length = - BOOST_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length")); + PADDLE_GET_CONST(int, quant_dequant_op->Op()->GetAttr("bit_length")); // Get input scale from tensor std::string input_scale_var_name = diff --git a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc index b37d7978a8e03b..ccd9cbce7be174 100644 --- a/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc +++ b/paddle/fluid/framework/ir/delete_weight_dequant_linear_op_pass.cc @@ -297,7 +297,7 @@ void DeleteWeightQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { } */ std::unordered_set nodes2rm = {}; - int bit_length = BOOST_GET_CONST( + int bit_length = PADDLE_GET_CONST( int, weight_dequantize_linear_op->Op()->GetAttr("bit_length")); int range = ((1 << (bit_length - 1)) - 1); @@ -327,7 +327,7 @@ void DeleteWeightQuantDequantLinearOpPass::ApplyImpl(ir::Graph* graph) const { std::vector weight_data_tmp; weight_data_tmp.reserve(weight_tensor->numel()); - int quant_axis = BOOST_GET_CONST( + int quant_axis = PADDLE_GET_CONST( int, weight_dequantize_linear_op->Op()->GetAttr("quant_axis")); if (quant_axis == -1) { // per_layer quant_dequant: all OP PADDLE_ENFORCE_EQ(weight_scale_nums, diff --git a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc index a798be055f9a58..4fb1cb08ecabac 100644 --- a/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/embedding_fc_lstm_fuse_pass.cc @@ -230,9 +230,9 @@ static int BuildFusion(Graph* graph, // TODO(jczaja): Add support for is_sparse / is_distributed auto is_sparse = - BOOST_GET_CONST(bool, lookup_table->Op()->GetAttr("is_sparse")); + PADDLE_GET_CONST(bool, lookup_table->Op()->GetAttr("is_sparse")); auto is_distributed = - BOOST_GET_CONST(bool, lookup_table->Op()->GetAttr("is_distributed")); + PADDLE_GET_CONST(bool, lookup_table->Op()->GetAttr("is_distributed")); if (is_sparse == true || is_distributed == true) { return; diff --git a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc index 4fe403cc751668..417d79972806fc 100644 --- a/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_elementwise_layernorm_fuse_pass.cc @@ -253,7 +253,7 @@ void FCElementwiseLayerNormFusePass::ApplyImpl(ir::Graph *graph) const { } int begin_norm_axis = - BOOST_GET_CONST(int, layer_norm->Op()->GetAttr("begin_norm_axis")); + PADDLE_GET_CONST(int, layer_norm->Op()->GetAttr("begin_norm_axis")); auto layer_norm_x_dims = fc_out->Var()->GetShape(); auto layer_norm_x_mat_dims = phi::flatten_to_2d(phi::make_ddim(layer_norm_x_dims), begin_norm_axis); diff --git a/paddle/fluid/framework/ir/fc_fuse_pass.cc b/paddle/fluid/framework/ir/fc_fuse_pass.cc index f7a8ea407c02ff..a71f6ac94b415b 100644 --- a/paddle/fluid/framework/ir/fc_fuse_pass.cc +++ b/paddle/fluid/framework/ir/fc_fuse_pass.cc @@ -139,8 +139,8 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const { // axis of elementwise_add should be -1 or x_num_col_dims auto x_num_col_dims = - BOOST_GET_CONST(int, mul->Op()->GetAttr("x_num_col_dims")); - auto axis = BOOST_GET_CONST(int, elementwise_add->Op()->GetAttr("axis")); + PADDLE_GET_CONST(int, mul->Op()->GetAttr("x_num_col_dims")); + auto axis = PADDLE_GET_CONST(int, elementwise_add->Op()->GetAttr("axis")); if (axis != -1 && axis != x_num_col_dims) return; // Shape of bias should be [1, out_size] or [out_size] @@ -263,7 +263,7 @@ int FCFusePass::ApplyFCPattern(Graph* graph, bool with_relu) const { elementwise_add_op_desc->GetNullableAttr("out_threshold"); if (out_threshold_attr.index()) { VLOG(4) << "setting out_threshold: " - << BOOST_GET_CONST(float, out_threshold_attr); + << PADDLE_GET_CONST(float, out_threshold_attr); desc.SetAttr("out_threshold", out_threshold_attr); } desc.Flush(); diff --git a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc index fd51eeef00d9aa..5bd26e9eb9f2d6 100644 --- a/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc +++ b/paddle/fluid/framework/ir/fuse_elewise_add_act_pass.cc @@ -255,7 +255,7 @@ void FuseElewiseAddActPass::RemoveIntermediateOut(Graph *graph) const { for (auto &cur_node : graph->Nodes()) { if (cur_node->IsVar()) continue; if (cur_node->Name() == "fused_elemwise_add_activation") { - bool save_intermediate_out = BOOST_GET_CONST( + bool save_intermediate_out = PADDLE_GET_CONST( bool, cur_node->Op()->GetAttr("save_intermediate_out")); auto intermediate_out_args = cur_node->Op()->Output("IntermediateOut"); PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/framework/ir/fuse_gemm_epilogue_pass.cc b/paddle/fluid/framework/ir/fuse_gemm_epilogue_pass.cc index 740c3b39ab06b5..e98f2bb144edde 100644 --- a/paddle/fluid/framework/ir/fuse_gemm_epilogue_pass.cc +++ b/paddle/fluid/framework/ir/fuse_gemm_epilogue_pass.cc @@ -27,8 +27,8 @@ namespace ir { static void GetTransposeAttrsFromOp(const OpDesc &op, bool *trans_x, bool *trans_y) { - *trans_x = BOOST_GET_CONST(bool, op.GetAttr("trans_x")); - *trans_y = BOOST_GET_CONST(bool, op.GetAttr("trans_y")); + *trans_x = PADDLE_GET_CONST(bool, op.GetAttr("trans_x")); + *trans_y = PADDLE_GET_CONST(bool, op.GetAttr("trans_y")); } void FuseGemmEpiloguePass::ApplyImpl(ir::Graph *graph) const { @@ -492,7 +492,7 @@ bool FuseGemmEpiloguePass::IsGemmFromLinear_( "fused_transpose_Y"}) { if (matmul_v2_op->HasAttr(attr_name)) { std::vector tmp_vec = - BOOST_GET_CONST(std::vector, matmul_v2_op->GetAttr(attr_name)); + PADDLE_GET_CONST(std::vector, matmul_v2_op->GetAttr(attr_name)); if (tmp_vec.size() > 0) return false; } } diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc index e2f29c18ecfbbd..3a7a04450055df 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_adam_op_pass.cc @@ -111,69 +111,69 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { // Check attributions // NOTE: If new attribution is added, the following code maybe need change. - int op_role = BOOST_GET_CONST( + int op_role = PADDLE_GET_CONST( int, adam_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); - float beta1 = BOOST_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta1")); - float beta2 = BOOST_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta2")); + float beta1 = PADDLE_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta1")); + float beta2 = PADDLE_GET_CONST(float, adam_ops[0]->Op()->GetAttr("beta2")); float epsilon = - BOOST_GET_CONST(float, adam_ops[0]->Op()->GetAttr("epsilon")); + PADDLE_GET_CONST(float, adam_ops[0]->Op()->GetAttr("epsilon")); bool lazy_mode = - BOOST_GET_CONST(bool, adam_ops[0]->Op()->GetAttr("lazy_mode")); - int64_t min_row_size_to_use_multithread = BOOST_GET_CONST( + PADDLE_GET_CONST(bool, adam_ops[0]->Op()->GetAttr("lazy_mode")); + int64_t min_row_size_to_use_multithread = PADDLE_GET_CONST( int64_t, adam_ops[0]->Op()->GetAttr("min_row_size_to_use_multithread")); for (auto &adam_op : adam_ops) { PADDLE_ENFORCE_EQ( beta1, - BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta1")), + PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta1")), platform::errors::PreconditionNotMet( "All adam Op's attr(beta1) must be same, but there are two " "different " "value: %f, %f.", beta1, - BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta1")))); + PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta1")))); PADDLE_ENFORCE_EQ( beta2, - BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta2")), + PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta2")), platform::errors::PreconditionNotMet( "All adam Op's attr(beta2) must be same, but there are two " "different " "value: %f, %f.", beta2, - BOOST_GET_CONST(float, adam_op->Op()->GetAttr("beta2")))); + PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("beta2")))); PADDLE_ENFORCE_EQ( epsilon, - BOOST_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")), + PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")), platform::errors::PreconditionNotMet( "All adam Op's attr(epsilon) must be same, but there are two " "different " "value: %f, %f.", epsilon, - BOOST_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")))); + PADDLE_GET_CONST(float, adam_op->Op()->GetAttr("epsilon")))); PADDLE_ENFORCE_EQ( lazy_mode, - BOOST_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")), + PADDLE_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")), platform::errors::PreconditionNotMet( "All adam Op's attr(lazy_mode) must be same, but there are two " "different " "value: %d, %d.", lazy_mode, - BOOST_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")))); + PADDLE_GET_CONST(bool, adam_op->Op()->GetAttr("lazy_mode")))); PADDLE_ENFORCE_EQ( min_row_size_to_use_multithread, - BOOST_GET_CONST( + PADDLE_GET_CONST( int64_t, adam_op->Op()->GetAttr("min_row_size_to_use_multithread")), platform::errors::PreconditionNotMet( "All adam Op's attr(min_row_size_to_use_multithread) must be " "same, but there are two different value: %I64, %I64.", min_row_size_to_use_multithread, - BOOST_GET_CONST( + PADDLE_GET_CONST( int64_t, adam_op->Op()->GetAttr("min_row_size_to_use_multithread")))); PADDLE_ENFORCE_EQ( op_role, - BOOST_GET_CONST( + PADDLE_GET_CONST( int, adam_op->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())), platform::errors::PreconditionNotMet( @@ -181,9 +181,9 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { "different " "value: %d, %d.", op_role, - BOOST_GET_CONST(int, - adam_op->Op()->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())))); + PADDLE_GET_CONST(int, + adam_op->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())))); } // NOTE: fused_var is only exist in scope, so the graph doesn't have @@ -270,54 +270,54 @@ class FuseAdamOpPass : public FuseOptimizerOpPass { VLOG(6) << "The number of scale op is " << scale_ops.size() << "."; // Check attributions // NOTE: If new attribution is added, the following code maybe need change. - int op_role = BOOST_GET_CONST( + int op_role = PADDLE_GET_CONST( int, scale_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); - float scale = BOOST_GET_CONST(float, scale_ops[0]->Op()->GetAttr("scale")); - float bias = BOOST_GET_CONST(float, scale_ops[0]->Op()->GetAttr("bias")); + float scale = PADDLE_GET_CONST(float, scale_ops[0]->Op()->GetAttr("scale")); + float bias = PADDLE_GET_CONST(float, scale_ops[0]->Op()->GetAttr("bias")); bool bias_after_scale = - BOOST_GET_CONST(bool, scale_ops[0]->Op()->GetAttr("bias_after_scale")); + PADDLE_GET_CONST(bool, scale_ops[0]->Op()->GetAttr("bias_after_scale")); for (auto &scale_op : scale_ops) { PADDLE_ENFORCE_EQ( scale, - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")), + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale")), platform::errors::PreconditionNotMet( "All scale Op's attr(scale) must be same, but there are two " "different " "value: %f, %f.", scale, - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")))); + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale")))); PADDLE_ENFORCE_EQ( bias, - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")), + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")), platform::errors::PreconditionNotMet( "All scale Op's attr(bias) must be same, but there are two " "different " "value: %f, %f.", bias, - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")))); + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")))); PADDLE_ENFORCE_EQ( bias_after_scale, - BOOST_GET_CONST(bool, scale_op->Op()->GetAttr("bias_after_scale")), + PADDLE_GET_CONST(bool, scale_op->Op()->GetAttr("bias_after_scale")), platform::errors::PreconditionNotMet( "All scale Op's attr(bias_after_scale) must be same, but there " "are two different value: %d, %d.", bias_after_scale, - BOOST_GET_CONST(bool, - scale_op->Op()->GetAttr("bias_after_scale")))); + PADDLE_GET_CONST(bool, + scale_op->Op()->GetAttr("bias_after_scale")))); PADDLE_ENFORCE_EQ( op_role, - BOOST_GET_CONST(int, - scale_op->Op()->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())), + PADDLE_GET_CONST(int, + scale_op->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())), platform::errors::PreconditionNotMet( "All scale Op's attr(op_role) must be same, but there are two " "different " "value: %d, %d.", op_role, - BOOST_GET_CONST(int, - scale_op->Op()->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())))); + PADDLE_GET_CONST(int, + scale_op->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())))); } // NOTE: fused_var is only exist in scope, so the graph doesn't have diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc index 34b48fcc8c64e8..bab16feeb8b64c 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_momentum_op_pass.cc @@ -49,45 +49,45 @@ class FuseMomentumOpPass : public FuseOptimizerOpPass { // Check attributions // NOTE: If new attribution is added, the following code maybe need change. int op_role = - BOOST_GET_CONST(int, - momentum_ops[0]->Op()->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())); - float mu = BOOST_GET_CONST(float, momentum_ops[0]->Op()->GetAttr("mu")); + PADDLE_GET_CONST(int, + momentum_ops[0]->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())); + float mu = PADDLE_GET_CONST(float, momentum_ops[0]->Op()->GetAttr("mu")); bool use_nesterov = - BOOST_GET_CONST(bool, momentum_ops[0]->Op()->GetAttr("use_nesterov")); + PADDLE_GET_CONST(bool, momentum_ops[0]->Op()->GetAttr("use_nesterov")); for (auto &momentum_op : momentum_ops) { PADDLE_ENFORCE_EQ( mu, - BOOST_GET_CONST(float, momentum_op->Op()->GetAttr("mu")), + PADDLE_GET_CONST(float, momentum_op->Op()->GetAttr("mu")), platform::errors::InvalidArgument( "All momentum Op's attr(mu) must be same, but there are two " "different " "value: %f, %f.", mu, - BOOST_GET_CONST(float, momentum_op->Op()->GetAttr("mu")))); + PADDLE_GET_CONST(float, momentum_op->Op()->GetAttr("mu")))); PADDLE_ENFORCE_EQ( use_nesterov, - BOOST_GET_CONST(bool, momentum_op->Op()->GetAttr("use_nesterov")), + PADDLE_GET_CONST(bool, momentum_op->Op()->GetAttr("use_nesterov")), platform::errors::InvalidArgument( "All momentum Op's attr(use_nesterov) must be same, but there " "are two different value: %d, %d.", use_nesterov, - BOOST_GET_CONST(bool, - momentum_op->Op()->GetAttr("use_nesterov")))); + PADDLE_GET_CONST(bool, + momentum_op->Op()->GetAttr("use_nesterov")))); PADDLE_ENFORCE_EQ( op_role, - BOOST_GET_CONST(int, - momentum_op->Op()->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())), + PADDLE_GET_CONST(int, + momentum_op->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())), platform::errors::InvalidArgument( "All momentum Op's attr(op_role) must be same, but there are two " "different " "value: %d, %d.", op_role, - BOOST_GET_CONST(int, - momentum_op->Op()->GetAttr( - OpProtoAndCheckerMaker::OpRoleAttrName())))); + PADDLE_GET_CONST(int, + momentum_op->Op()->GetAttr( + OpProtoAndCheckerMaker::OpRoleAttrName())))); } // NOTE: fused_var is only exist in scope, so the graph doesn't have diff --git a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc index f37345126dbfb3..e47b8248d631ef 100644 --- a/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc +++ b/paddle/fluid/framework/ir/fuse_optimizer_ops_pass/fuse_sgd_op_pass.cc @@ -48,7 +48,7 @@ class FuseSgdOpPass : public FuseOptimizerOpPass { // NOTE: fused_var is only exist in scope, so the graph doesn't have // fused_var node. - int op_role = BOOST_GET_CONST( + int op_role = PADDLE_GET_CONST( int, sgd_ops[0]->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); VLOG(6) << "Insert sgd to graph."; diff --git a/paddle/fluid/framework/ir/fuse_pass_base.cc b/paddle/fluid/framework/ir/fuse_pass_base.cc index a6ab6cf988c35b..ac9157d2222fea 100644 --- a/paddle/fluid/framework/ir/fuse_pass_base.cc +++ b/paddle/fluid/framework/ir/fuse_pass_base.cc @@ -63,9 +63,9 @@ FuseOptions FusePassBase::FindFuseOption(const Node& node1, const Node& node2) const { #ifdef PADDLE_WITH_MKLDNN bool node1_mkldnn = node1.Op()->HasAttr("use_mkldnn") && - BOOST_GET_CONST(bool, node1.Op()->GetAttr("use_mkldnn")); + PADDLE_GET_CONST(bool, node1.Op()->GetAttr("use_mkldnn")); bool node2_mkldnn = node2.Op()->HasAttr("use_mkldnn") && - BOOST_GET_CONST(bool, node2.Op()->GetAttr("use_mkldnn")); + PADDLE_GET_CONST(bool, node2.Op()->GetAttr("use_mkldnn")); if (node1_mkldnn && node2_mkldnn) return FUSE_MKLDNN; else if (!node1_mkldnn && !node2_mkldnn) diff --git a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc index 7d206236a42c70..8441b1e02f9bf2 100644 --- a/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc +++ b/paddle/fluid/framework/ir/fusion_group/code_generator_helper.cc @@ -80,26 +80,26 @@ static std::string RefineTemplateWithAttr(const std::string& op_type, proto::AttrType attr_type = static_cast(it->second.index() - 1); if (attr_type == proto::AttrType::BOOLEAN) { - bool result = BOOST_GET(bool, attr); + bool result = PADDLE_GET(bool, attr); if (result) { ret = "true"; } else { ret = "false"; } } else if (attr_type == proto::AttrType::INT) { - int result = BOOST_GET(int, attr); + int result = PADDLE_GET(int, attr); str_cvt << result; ret = str_cvt.str(); } else if (attr_type == proto::AttrType::LONG) { - int64_t result = BOOST_GET(int64_t, attr); + int64_t result = PADDLE_GET(int64_t, attr); str_cvt << result; ret = str_cvt.str(); } else if (attr_type == proto::AttrType::FLOAT) { - float result = BOOST_GET(float, attr); + float result = PADDLE_GET(float, attr); str_cvt << result; ret = str_cvt.str(); } else if (attr_type == proto::AttrType::STRING) { - std::string result = BOOST_GET(std::string, attr); + std::string result = PADDLE_GET(std::string, attr); ret = result; } } else { diff --git a/paddle/fluid/framework/ir/fusion_group/fusion_group_pass.cc b/paddle/fluid/framework/ir/fusion_group/fusion_group_pass.cc index 7a79dcbe2de27b..3ed66b576d8831 100644 --- a/paddle/fluid/framework/ir/fusion_group/fusion_group_pass.cc +++ b/paddle/fluid/framework/ir/fusion_group/fusion_group_pass.cc @@ -104,7 +104,7 @@ static int ExtractOpRole(fusion_group::SubGraph* subgraph) { for (auto* n : subgraph->Nodes()) { if (n && n->IsOp() && n->Op()) { if (n->Op()->HasAttr(attr_name)) { - op_roles.insert(BOOST_GET_CONST(int, n->Op()->GetAttr(attr_name))); + op_roles.insert(PADDLE_GET_CONST(int, n->Op()->GetAttr(attr_name))); } } } diff --git a/paddle/fluid/framework/ir/gpu_cpu_map_matmul_to_mul_pass.cc b/paddle/fluid/framework/ir/gpu_cpu_map_matmul_to_mul_pass.cc index 457f143116b334..6edd8c3e4de45a 100644 --- a/paddle/fluid/framework/ir/gpu_cpu_map_matmul_to_mul_pass.cc +++ b/paddle/fluid/framework/ir/gpu_cpu_map_matmul_to_mul_pass.cc @@ -272,10 +272,10 @@ void GpuCpuMapMatmul2MulPass::ApplyImpl(ir::Graph* graph) const { bool flag = true; bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); bool transpose_Y = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); flag = flag && !transpose_X && !transpose_Y && std::abs(alpha - 1.0) < 1e-5; std::vector x_shape = matmul_in_x->Var()->GetShape(); @@ -346,9 +346,9 @@ void GpuCpuMapMatmulV2ToMulPass::ApplyImpl(ir::Graph* graph) const { bool flag = true; bool trans_x = - BOOST_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x")); + PADDLE_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x")); bool trans_y = - BOOST_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_y")); + PADDLE_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_y")); flag = flag && !trans_x && !trans_y; std::vector x_shape = matmul_v2_in_x->Var()->GetShape(); @@ -494,16 +494,16 @@ void GpuCpuSqueeze2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { size_t squeeze2_in_x_rank = (squeeze2_in_x->Var()->GetShape()).size(); std::vector squeeze2_op_axes = - BOOST_GET_CONST(std::vector, squeeze2_op->Op()->GetAttr("axes")); + PADDLE_GET_CONST(std::vector, squeeze2_op->Op()->GetAttr("axes")); flag = flag && squeeze2_in_x_rank == 4 && squeeze2_op_axes == std::vector{2, 3} && (matmul_in_x->outputs).size() == 1; bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); bool transpose_Y = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); flag = flag && !transpose_X && !transpose_Y && @@ -638,16 +638,16 @@ void GpuCpuReshape2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { auto reshape2_in_x_shape = reshape2_in_x->Var()->GetShape(); size_t reshape2_in_x_rank = reshape2_in_x_shape.size(); std::vector reshape2_op_shape = - BOOST_GET_CONST(std::vector, reshape2_op->Op()->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, reshape2_op->Op()->GetAttr("shape")); flag = flag && reshape2_in_nums == 1 && reshape2_in_x_rank == 4 && reshape2_in_x_shape[2] == 1 && reshape2_in_x_shape[3] == 1 && reshape2_op_shape.size() == 2 && (matmul_in_x->outputs).size() == 1; bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); bool transpose_Y = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); flag = flag && !transpose_X && !transpose_Y && @@ -720,7 +720,7 @@ void GpuCpuFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { auto flatten2_in_x_shape = flatten2_in_x->Var()->GetShape(); size_t flatten2_in_x_rank = flatten2_in_x_shape.size(); int flatten2_axis = - BOOST_GET_CONST(int, flatten2_op->Op()->GetAttr("axis")); + PADDLE_GET_CONST(int, flatten2_op->Op()->GetAttr("axis")); // only convert matmul to mul when the flatten2 has a single input // and the rank of input is 4 and the size of the output of matmul // is 1. @@ -729,10 +729,10 @@ void GpuCpuFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { (matmul_in_x->outputs).size() == 1; bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); bool transpose_Y = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); pattern_found = pattern_found && !transpose_X && !transpose_Y && diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.cc b/paddle/fluid/framework/ir/graph_pattern_detector.cc index b0792ee0812c96..6191c2efe9087c 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.cc +++ b/paddle/fluid/framework/ir/graph_pattern_detector.cc @@ -2799,7 +2799,7 @@ void patterns::ShuffleChannelPattern::operator()(PDNode *reshape1_in) { auto reshape1_op = pattern->NewNode(reshape1_op_repr())->assert_is_op("reshape2"); reshape1_op->assert_more([&](Node *x) { - return BOOST_GET_CONST(std::vector, x->Op()->GetAttr("shape")) + return PADDLE_GET_CONST(std::vector, x->Op()->GetAttr("shape")) .size() == 5; }); diff --git a/paddle/fluid/framework/ir/graph_pattern_detector.h b/paddle/fluid/framework/ir/graph_pattern_detector.h index 09dd426be2dafd..00e565b7161a2a 100644 --- a/paddle/fluid/framework/ir/graph_pattern_detector.h +++ b/paddle/fluid/framework/ir/graph_pattern_detector.h @@ -163,7 +163,7 @@ struct PDNode { PDNode* assert_op_attr(const std::string& attr_name, const T& attr) { asserts_.emplace_back([=](Node* x) { return x && x->IsOp() && x->Op()->HasAttr(attr_name) && - BOOST_GET_CONST(T, x->Op()->GetAttr(attr_name)) == attr; + PADDLE_GET_CONST(T, x->Op()->GetAttr(attr_name)) == attr; }); return this; } diff --git a/paddle/fluid/framework/ir/graph_viz_pass.cc b/paddle/fluid/framework/ir/graph_viz_pass.cc index 9097747e6a039c..eed4ab38f5be74 100644 --- a/paddle/fluid/framework/ir/graph_viz_pass.cc +++ b/paddle/fluid/framework/ir/graph_viz_pass.cc @@ -33,7 +33,7 @@ std::string FormatName(const Node* node) { !node->Op()->HasAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())) { return node->Name(); } - const std::string full_scope = BOOST_GET_CONST( + const std::string full_scope = PADDLE_GET_CONST( std::string, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpNamescopeAttrName())); return string::Sprintf("%s%s", full_scope.c_str(), node->Name().c_str()); diff --git a/paddle/fluid/framework/ir/ipu/delete_scale_op_pass.cc b/paddle/fluid/framework/ir/ipu/delete_scale_op_pass.cc index 753d13ded4c508..6300c0e32971db 100644 --- a/paddle/fluid/framework/ir/ipu/delete_scale_op_pass.cc +++ b/paddle/fluid/framework/ir/ipu/delete_scale_op_pass.cc @@ -45,8 +45,8 @@ void DeleteScaleOpPass::ApplyImpl(ir::Graph* graph) const { auto input_var_node = node->inputs[0]; auto output_var_node = node->outputs[0]; // only optimize scale *1 + 0 - auto scale = BOOST_GET_CONST(float, op->GetAttr("scale")); - auto bias = BOOST_GET_CONST(float, op->GetAttr("bias")); + auto scale = PADDLE_GET_CONST(float, op->GetAttr("scale")); + auto bias = PADDLE_GET_CONST(float, op->GetAttr("bias")); if (scale != 1 || bias != 0) { return; } diff --git a/paddle/fluid/framework/ir/ipu/forward_graph_extract_pass.cc b/paddle/fluid/framework/ir/ipu/forward_graph_extract_pass.cc index 0b9c7ebc47d21d..3d03ce88cbc9ad 100644 --- a/paddle/fluid/framework/ir/ipu/forward_graph_extract_pass.cc +++ b/paddle/fluid/framework/ir/ipu/forward_graph_extract_pass.cc @@ -36,7 +36,7 @@ void ForwardGraphExtractPass::ApplyImpl(ir::Graph* graph) const { if (!node->IsOp()) { continue; } - auto op_role = BOOST_GET_MUTABLE(int, node->Op()->GetAttr("op_role")); + auto op_role = PADDLE_GET_MUTABLE(int, node->Op()->GetAttr("op_role")); if (op_role == static_cast(OpRole::kForward)) { all_ops[OpRole::kForward].insert(node); } else if (op_role == static_cast(OpRole::kBackward)) { diff --git a/paddle/fluid/framework/ir/ipu/inference_dtype_transfer_pass.cc b/paddle/fluid/framework/ir/ipu/inference_dtype_transfer_pass.cc index f06f05e9f0242c..a0820afc2d8ee7 100644 --- a/paddle/fluid/framework/ir/ipu/inference_dtype_transfer_pass.cc +++ b/paddle/fluid/framework/ir/ipu/inference_dtype_transfer_pass.cc @@ -74,7 +74,8 @@ void InferenceDtypeTransferPass::ApplyImpl(ir::Graph* graph) const { auto* op_desc = node->Op(); if (op_desc->Type() == "popart_cast") { // Transfer the target dtype of cast Op - if (BOOST_GET_CONST(std::string, op_desc->GetAttr("to")) == "FLOAT") { + if (PADDLE_GET_CONST(std::string, op_desc->GetAttr("to")) == + "FLOAT") { op_desc->SetAttr("to", std::string("FLOAT16")); op_desc->Flush(); } diff --git a/paddle/fluid/framework/ir/ipu/inference_process_pass.cc b/paddle/fluid/framework/ir/ipu/inference_process_pass.cc index 1ef03b1bd9cfbd..55a4e320ea274e 100644 --- a/paddle/fluid/framework/ir/ipu/inference_process_pass.cc +++ b/paddle/fluid/framework/ir/ipu/inference_process_pass.cc @@ -112,12 +112,12 @@ void InferenceProcessPass::ApplyImpl(ir::Graph* graph) const { for (auto node : graph->Nodes()) { if (node->Name() == "feed") { if (node->IsOp()) { - feed_list[BOOST_GET_CONST(int, node->Op()->GetAttr("col"))] = + feed_list[PADDLE_GET_CONST(int, node->Op()->GetAttr("col"))] = node->outputs[0]->Name(); } } else if (node->Name() == "fetch") { if (node->IsOp()) { - fetch_list[BOOST_GET_CONST(int, node->Op()->GetAttr("col"))] = + fetch_list[PADDLE_GET_CONST(int, node->Op()->GetAttr("col"))] = node->inputs[0]->Name(); } } diff --git a/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc b/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc index b45a39aaa8680f..284d144bf75341 100644 --- a/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc +++ b/paddle/fluid/framework/ir/ipu/optimizer_extract_pass.cc @@ -76,7 +76,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { auto op = node->Op(); auto op_type = op->Type(); - int op_role_ = BOOST_GET_CONST( + int op_role_ = PADDLE_GET_CONST( int, op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); auto op_role = static_cast(op_role_); @@ -84,7 +84,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { // save weight decay value from every lamb optimizer op if (op_type == "lamb" && op->HasAttr("weight_decay")) { auto weight_decay_value = - BOOST_GET_CONST(float, op->GetAttr("weight_decay")); + PADDLE_GET_CONST(float, op->GetAttr("weight_decay")); auto params = op->Output("ParamOut"); weight_decay_vars.push_back(params[0]); weight_decay_values.push_back(weight_decay_value); @@ -95,7 +95,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } auto op_namescope = - BOOST_GET_CONST(std::string, op->GetAttr("op_namescope")); + PADDLE_GET_CONST(std::string, op->GetAttr("op_namescope")); bool is_grad_clip = is_grad_clip_op(op_namescope); // bool is_optimizer = is_optimizer_op(op_namescope); bool is_regularization = is_regularization_op(op_namescope); @@ -114,32 +114,33 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } else if (op_type == "momentum") { auto type = std::string{"sgd"}; // auto LearningRate = op->Input("LearningRate"); - auto use_nesterov = BOOST_GET_CONST(bool, op->GetAttr("use_nesterov")); + auto use_nesterov = PADDLE_GET_CONST(bool, op->GetAttr("use_nesterov")); PADDLE_ENFORCE_EQ(use_nesterov, false, platform::errors::Unimplemented( "ipu does not support nesterov mode.")); auto regularization_method = - BOOST_GET_CONST(std::string, op->GetAttr("regularization_method")); + PADDLE_GET_CONST(std::string, op->GetAttr("regularization_method")); PADDLE_ENFORCE_NE(regularization_method, "l1_decay", platform::errors::Unimplemented( "ipu does not support l1_decay mode.")); auto multi_precision = - BOOST_GET_CONST(bool, op->GetAttr("multi_precision")); + PADDLE_GET_CONST(bool, op->GetAttr("multi_precision")); PADDLE_ENFORCE_EQ(multi_precision, false, platform::errors::Unimplemented( "ipu does not support multi_precision mode.")); - auto rescale_grad = BOOST_GET_CONST(float, op->GetAttr("rescale_grad")); + auto rescale_grad = + PADDLE_GET_CONST(float, op->GetAttr("rescale_grad")); PADDLE_ENFORCE_EQ(rescale_grad, 1.0, platform::errors::Unimplemented( "ipu does not support rescale_grad mode.")); auto regularization_coeff = - BOOST_GET_CONST(float, op->GetAttr("regularization_coeff")); + PADDLE_GET_CONST(float, op->GetAttr("regularization_coeff")); auto lr_var = op->Input("LearningRate").front(); - auto momentum = BOOST_GET_CONST(float, op->GetAttr("mu")); + auto momentum = PADDLE_GET_CONST(float, op->GetAttr("mu")); new_op.SetAttr("type", type); new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("momentum", momentum); @@ -148,12 +149,12 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } else if (op_type == "adam" || op_type == "adamw") { auto type = std::string{"adam"}; auto lr_var = op->Input("LearningRate").front(); - auto beta1 = BOOST_GET_CONST(float, op->GetAttr("beta1")); - auto beta2 = BOOST_GET_CONST(float, op->GetAttr("beta2")); - auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); - auto lazy_mode = BOOST_GET_CONST(bool, op->GetAttr("lazy_mode")); + auto beta1 = PADDLE_GET_CONST(float, op->GetAttr("beta1")); + auto beta2 = PADDLE_GET_CONST(float, op->GetAttr("beta2")); + auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); + auto lazy_mode = PADDLE_GET_CONST(bool, op->GetAttr("lazy_mode")); auto multi_precision = - BOOST_GET_CONST(bool, op->GetAttr("multi_precision")); + PADDLE_GET_CONST(bool, op->GetAttr("multi_precision")); PADDLE_ENFORCE_EQ(lazy_mode, false, platform::errors::Unimplemented( @@ -180,9 +181,9 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } else if (op_type == "adamax") { auto type = std::string{"adam"}; auto lr_var = op->Input("LearningRate").front(); - auto beta1 = BOOST_GET_CONST(float, op->GetAttr("beta1")); - auto beta2 = BOOST_GET_CONST(float, op->GetAttr("beta2")); - auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); + auto beta1 = PADDLE_GET_CONST(float, op->GetAttr("beta1")); + auto beta2 = PADDLE_GET_CONST(float, op->GetAttr("beta2")); + auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); new_op.SetAttr("type", type); new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("weight_decay", 0.0f); @@ -196,10 +197,11 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { // use decay mode auto type = std::string{"adam"}; auto lr_var = op->Input("LearningRate").front(); - auto weight_decay = BOOST_GET_CONST(float, op->GetAttr("weight_decay")); - auto beta1 = BOOST_GET_CONST(float, op->GetAttr("beta1")); - auto beta2 = BOOST_GET_CONST(float, op->GetAttr("beta2")); - auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); + auto weight_decay = + PADDLE_GET_CONST(float, op->GetAttr("weight_decay")); + auto beta1 = PADDLE_GET_CONST(float, op->GetAttr("beta1")); + auto beta2 = PADDLE_GET_CONST(float, op->GetAttr("beta2")); + auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); new_op.SetAttr("type", type); new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("weight_decay", weight_decay); @@ -212,8 +214,8 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } else if (op_type == "adadelta") { // NO LearningRate auto type = std::string{"adaptive"}; - auto rho = BOOST_GET_CONST(float, op->GetAttr("rho")); - auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); + auto rho = PADDLE_GET_CONST(float, op->GetAttr("rho")); + auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); new_op.SetAttr("type", type); new_op.SetAttr("weight_decay", 0.0f); new_op.SetAttr("alpha", rho); @@ -225,7 +227,7 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } else if (op_type == "adagrad") { auto type = std::string{"adaptive"}; auto lr_var = op->Input("LearningRate").front(); - auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); + auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); new_op.SetAttr("type", type); new_op.SetAttr("lr_var", lr_var); new_op.SetAttr("weight_decay", 0.0f); @@ -239,10 +241,10 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } else if (op_type == "rmsprop") { auto type = std::string{"adaptive"}; auto lr_var = op->Input("LearningRate").front(); - auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); - auto decay = BOOST_GET_CONST(float, op->GetAttr("decay")); - auto momentum = BOOST_GET_CONST(float, op->GetAttr("momentum")); - auto centered = BOOST_GET_CONST(bool, op->GetAttr("centered")); + auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); + auto decay = PADDLE_GET_CONST(float, op->GetAttr("decay")); + auto momentum = PADDLE_GET_CONST(float, op->GetAttr("momentum")); + auto centered = PADDLE_GET_CONST(bool, op->GetAttr("centered")); new_op.SetAttr("type", type); new_op.SetAttr("weight_decay", 0.0f); new_op.SetAttr("alpha", decay); @@ -258,11 +260,11 @@ void IpuOptimizerExtractPass::ApplyImpl(ir::Graph* graph) const { } } else if (is_regularization && op_type == "scale") { // set weight_decay for L2Decay - auto scale = BOOST_GET_CONST(float, op->GetAttr("scale")); + auto scale = PADDLE_GET_CONST(float, op->GetAttr("scale")); new_op.SetAttr("weight_decay", scale); } else if (is_grad_clip && op_type == "fill_constant") { // set clip_norm for ClipGradByGlobalNorm - auto value = BOOST_GET_CONST(float, op->GetAttr("value")); + auto value = PADDLE_GET_CONST(float, op->GetAttr("value")); new_op.SetAttr("clip_norm", value); } else if (ignored_ops.count(op_type)) { VLOG(10) << "Ignore optimizer releated op: " << op_type; diff --git a/paddle/fluid/framework/ir/ipu/optimizer_state_align_pass.cc b/paddle/fluid/framework/ir/ipu/optimizer_state_align_pass.cc index 545c0575aba83d..f99c7930b73377 100644 --- a/paddle/fluid/framework/ir/ipu/optimizer_state_align_pass.cc +++ b/paddle/fluid/framework/ir/ipu/optimizer_state_align_pass.cc @@ -32,7 +32,7 @@ void IpuOptimizerStateAlignPass::ApplyImpl(ir::Graph* graph) const { for (auto* node : graph->Nodes()) { if (node->IsOp() && node->Op()) { - int op_role = BOOST_GET_CONST( + int op_role = PADDLE_GET_CONST( int, node->Op()->GetAttr( framework::OpProtoAndCheckerMaker::OpRoleAttrName())); @@ -42,7 +42,7 @@ void IpuOptimizerStateAlignPass::ApplyImpl(ir::Graph* graph) const { if (inputs.count(platform::ipu::sBeta1Pow)) { auto var = scope_->GetVar(inputs.at(platform::ipu::sBeta1Pow)[0]); auto data = var->GetMutable()->data(); - auto beta = BOOST_GET_CONST( + auto beta = PADDLE_GET_CONST( float, node->Op()->GetAttr(platform::ipu::sBeta1)); // ensure current save with beta1pow, rather than step. diff --git a/paddle/fluid/framework/ir/is_test_pass_tester.cc b/paddle/fluid/framework/ir/is_test_pass_tester.cc index c28f886a450609..f44ed42279d079 100644 --- a/paddle/fluid/framework/ir/is_test_pass_tester.cc +++ b/paddle/fluid/framework/ir/is_test_pass_tester.cc @@ -159,12 +159,12 @@ TEST(IsTestPass, basic) { for (auto* node : graph->Nodes()) { if (node->IsOp()) { auto* op = node->Op(); - auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); + auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name")); if (op_name == "conv3") { ASSERT_FALSE(op->HasAttr("is_test")); } else { ASSERT_TRUE(op->HasAttr("is_test")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("is_test"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("is_test"))); } } } diff --git a/paddle/fluid/framework/ir/layer_norm_fuse_pass.cc b/paddle/fluid/framework/ir/layer_norm_fuse_pass.cc index b7309a9d04f317..afc117856755c6 100644 --- a/paddle/fluid/framework/ir/layer_norm_fuse_pass.cc +++ b/paddle/fluid/framework/ir/layer_norm_fuse_pass.cc @@ -57,14 +57,14 @@ bool validateReduceOpAttrs(const Node* node, const auto* op = node->Op(); if (op->HasAttr("reduce_all")) { EXPECT_TRUE( - !BOOST_GET_CONST(bool, op->GetAttr("reduce_all")), + !PADDLE_GET_CONST(bool, op->GetAttr("reduce_all")), ::paddle::string::Sprintf( "The LayerNorm fusion %s" "reduction must have \'reduce_all\' attribute set to false.", name)); } if (op->HasAttr("dim")) { - auto dims = BOOST_GET_CONST(std::vector, op->GetAttr("dim")); + auto dims = PADDLE_GET_CONST(std::vector, op->GetAttr("dim")); if (dims.size() == x_shape.size()) return false; if (1 == dims.size() && -1 == dims.front()) return true; @@ -289,18 +289,18 @@ void LayerNormFusePass::ApplyImpl(Graph* graph) const { CHECK_TRUE(validateReduceOpAttrs(std_dev, x_shape, "std_dev mean"), "Validation of standard deviation node failed."); - bool keep_dim = BOOST_GET_CONST(bool, x_mean->Op()->GetAttr("keep_dim")); + bool keep_dim = PADDLE_GET_CONST(bool, x_mean->Op()->GetAttr("keep_dim")); std::vector mean_dim = - BOOST_GET_CONST(std::vector, x_mean->Op()->GetAttr("dim")); + PADDLE_GET_CONST(std::vector, x_mean->Op()->GetAttr("dim")); std::vector std_mean_dim = - BOOST_GET_CONST(std::vector, std_dev->Op()->GetAttr("dim")); + PADDLE_GET_CONST(std::vector, std_dev->Op()->GetAttr("dim")); if (mean_dim != std_mean_dim) { LOG(WARNING) << "The LayerNorm dim of all mean must be same"; return; } if (!keep_dim) { - int sub_axis = BOOST_GET_CONST(int, x_sub_mean->Op()->GetAttr("axis")); - int div_axis = BOOST_GET_CONST(int, division->Op()->GetAttr("axis")); + int sub_axis = PADDLE_GET_CONST(int, x_sub_mean->Op()->GetAttr("axis")); + int div_axis = PADDLE_GET_CONST(int, division->Op()->GetAttr("axis")); if (sub_axis != 0 || div_axis != 0) return; } diff --git a/paddle/fluid/framework/ir/lock_free_optimize_pass.cc b/paddle/fluid/framework/ir/lock_free_optimize_pass.cc index e0ddaf95a93ba3..d6f4460113f88a 100644 --- a/paddle/fluid/framework/ir/lock_free_optimize_pass.cc +++ b/paddle/fluid/framework/ir/lock_free_optimize_pass.cc @@ -200,7 +200,7 @@ ir::Node* LockFreeOptimizePass::CreateNewSGDNode( new_desc.SetInput("Grad", std::vector({grad_node->Name()})); new_desc.SetOutput("ParamOut", old_desc->Output("ParamOut")); - std::vector op_role_vars = BOOST_GET_CONST( + std::vector op_role_vars = PADDLE_GET_CONST( std::vector, new_desc.GetAttr(framework::OpProtoAndCheckerMaker::OpRoleVarAttrName())); // replace the second op role var, because the grad name was diff --git a/paddle/fluid/framework/ir/matmul_scale_fuse_pass.cc b/paddle/fluid/framework/ir/matmul_scale_fuse_pass.cc index 1b5d4d3cc90a20..08adec160fe038 100644 --- a/paddle/fluid/framework/ir/matmul_scale_fuse_pass.cc +++ b/paddle/fluid/framework/ir/matmul_scale_fuse_pass.cc @@ -131,16 +131,16 @@ void MatmulScaleFusePass::ApplyImpl(ir::Graph* graph) const { GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, matmul_scale_pattern); auto* scope = param_scope(); - float bias = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")); + float bias = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")); if (std::abs(bias) > 1e-5) return; if (!IsCompat(subgraph, g)) { LOG(WARNING) << "matmul_scale_fuse_pass in op compat failed."; return; } - float scale = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); + float scale = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale")); float matmul_alpha = - BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); auto const& names = scale_op->Op()->InputNames(); bool has_scale_tensor = std::find(names.begin(), names.end(), "ScaleTensor") != names.end(); @@ -195,14 +195,14 @@ void MatmulV2ScaleFusePass::ApplyImpl(ir::Graph* graph) const { GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, matmul_v2_scale_pattern); auto* scope = param_scope(); - float bias = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")); + float bias = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")); if (std::abs(bias) > 1e-5) return; if (!IsCompat(subgraph, g)) { LOG(WARNING) << "matmul_v2_scale_fuse_pass in op compat failed."; return; } - float scale = BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); + float scale = PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale")); auto const& names = scale_op->Op()->InputNames(); bool has_scale_tensor = std::find(names.begin(), names.end(), "ScaleTensor") != names.end(); diff --git a/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc b/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc index 1180356b6072af..02934922f821e6 100644 --- a/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc +++ b/paddle/fluid/framework/ir/memory_optimize_pass/test_reference_count_pass_last_lived_ops.cc @@ -208,14 +208,14 @@ TEST(test_reference_count_pass, test_no_need_buffer_var_shrink) { for (auto use_cuda : use_cuda_list) { ReferenceCountPassTestHelper helper(program, use_cuda); ASSERT_TRUE(helper.IsLastLivedOps(x0, {"scale"})); - ASSERT_EQ( - BOOST_GET_CONST(float, helper.LastLivedOps(x0)[0]->Attrs().at("scale")), - 1.0f); + ASSERT_EQ(PADDLE_GET_CONST(float, + helper.LastLivedOps(x0)[0]->Attrs().at("scale")), + 1.0f); ASSERT_TRUE(helper.IsLastLivedOps(x1, {"scale"})); - ASSERT_EQ( - BOOST_GET_CONST(float, helper.LastLivedOps(x1)[0]->Attrs().at("scale")), - 3.0f); + ASSERT_EQ(PADDLE_GET_CONST(float, + helper.LastLivedOps(x1)[0]->Attrs().at("scale")), + 3.0f); ASSERT_TRUE(helper.IsLastLivedOps(x2, {"elementwise_mul"})); ASSERT_TRUE(helper.IsLastLivedOps(x3, {"elementwise_add_grad"})); diff --git a/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass.cc index c9658e1a8bad75..fa2bc4d374001c 100644 --- a/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass.cc @@ -112,7 +112,7 @@ void FuseBatchNormActOneDNNPass::FuseBatchNormAct( auto *bn_op = batch_norm->Op(); if (bn_op->HasAttr("trainable_statistics")) { PADDLE_ENFORCE( - !BOOST_GET_CONST(bool, bn_op->GetAttr("trainable_statistics")), + !PADDLE_GET_CONST(bool, bn_op->GetAttr("trainable_statistics")), platform::errors::PreconditionNotMet( "The BatchNorm+Act fusion may happen only when mean and variance " "are not calculated by current batch statistics.")); @@ -120,7 +120,7 @@ void FuseBatchNormActOneDNNPass::FuseBatchNormAct( if (bn_op->HasAttr("is_test")) { PADDLE_ENFORCE( - BOOST_GET_CONST(bool, bn_op->GetAttr("is_test")), + PADDLE_GET_CONST(bool, bn_op->GetAttr("is_test")), platform::errors::PreconditionNotMet( "The BatchNorm+Act fusion may happen only during inference.")); } diff --git a/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass_tester.cc index f646db555e83f6..9e989f343bb34a 100644 --- a/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/batch_norm_act_fuse_pass_tester.cc @@ -101,11 +101,11 @@ TEST(FuseBatchNormActOneDNNPass, FuseIsTest) { if (node->IsOp() && node->Op()->Type() == "batch_norm") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("fuse_with_relu")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("fuse_with_relu"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("fuse_with_relu"))); ASSERT_TRUE(op->HasAttr("trainable_statistics")); - EXPECT_FALSE(BOOST_GET_CONST(bool, op->GetAttr("trainable_statistics"))); + EXPECT_FALSE(PADDLE_GET_CONST(bool, op->GetAttr("trainable_statistics"))); } } } diff --git a/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass.cc index 99eaab49b7926f..394c1ae797e4c7 100644 --- a/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/compute_propagate_scales_mkldnn_pass.cc @@ -347,7 +347,7 @@ void ComputePropagateScalesMkldnnPass::UpdateScaleOpInScale( auto pair = iter->second; const auto tensor = pair.second; - const auto scale = BOOST_GET_CONST(float, op_node->Op()->GetAttr("scale")); + const auto scale = PADDLE_GET_CONST(float, op_node->Op()->GetAttr("scale")); Tensor tmp_tensor; tmp_tensor.Resize(tensor.dims()); auto* data = tmp_tensor.mutable_data(platform::CPUPlace()); diff --git a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc index bd07967757b8a9..5fe6eb50aad03a 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass.cc @@ -72,9 +72,10 @@ void ConvActivationMkldnnFusePass::FuseConvAct(Graph* graph, } if (act_type == "gelu" && activation->Op()->HasAttr("approximate")) { - act_type = BOOST_GET_CONST(bool, activation->Op()->GetAttr("approximate")) - ? "gelu_tanh" - : "gelu_erf"; + act_type = + PADDLE_GET_CONST(bool, activation->Op()->GetAttr("approximate")) + ? "gelu_tanh" + : "gelu_erf"; conv_op->SetAttr("fuse_alpha", 0.0f); conv_op->SetAttr("fuse_beta", 0.0f); } diff --git a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc index d75874c6de7120..3efc0c9508bb56 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_activation_mkldnn_fuse_pass_tester.cc @@ -166,8 +166,8 @@ void MainTest(std::string activation) { if (node->IsOp() && node->Op()->Type() == "conv2d") { auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); - auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name")); if (op->GetAttrIfExists("fuse_activation") == activation) { ++conv_activation_count; } diff --git a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc index 6b4e50fc6c1637..aefdd63cfcc991 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_bias_mkldnn_fuse_pass_tester.cc @@ -142,9 +142,9 @@ void MainTest(bool convWithExistingBias) { if (node->IsOp() && node->Op()->Type() == "conv2d") { auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); // check if "conv" convolution is fused - auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); + auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name")); if (op_name == "conv") { auto input_names = op->InputNames(); ASSERT_TRUE(std::find(input_names.begin(), input_names.end(), "Bias") != diff --git a/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass_tester.cc index fee75534dfa316..8210bfeba4ca18 100644 --- a/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/conv_concat_relu_mkldnn_fuse_pass_tester.cc @@ -122,7 +122,7 @@ void MainTest(const ProgramDesc& prog, bool fuse_relu) { if (op->Type() == "conv2d") { ASSERT_TRUE(op->HasAttr("fuse_activation")); bool fuse_relu_attr = - (BOOST_GET_CONST(std::string, op->GetAttr("fuse_activation")) == + (PADDLE_GET_CONST(std::string, op->GetAttr("fuse_activation")) == "relu"); EXPECT_EQ(fuse_relu, fuse_relu_attr); } else if (op->Type() == "relu") { diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc index 7cfc3f3336d5fa..2c99322e565e91 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass.cc @@ -449,9 +449,9 @@ void CPUQuantizePass::QuantizeConv(Graph* graph, if (conv_op->Op()->GetAttrIfExists("fuse_activation") == "relu6") { float scale_out = - BOOST_GET_CONST(float, conv_op->Op()->GetAttr("Scale_out")); + PADDLE_GET_CONST(float, conv_op->Op()->GetAttr("Scale_out")); float threshold = - BOOST_GET_CONST(float, conv_op->Op()->GetAttr("fuse_alpha")); + PADDLE_GET_CONST(float, conv_op->Op()->GetAttr("fuse_alpha")); conv_op->Op()->SetAttr("fuse_alpha", scale_out * threshold); } diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc index ec7432e83f874c..fdeaeccdf94ed1 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_pass_tester.cc @@ -798,14 +798,15 @@ void MainTestMultiGru(int layers) { if (op->Type() == "multi_gru") { multi_gru_nodes_count++; - auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); - EXPECT_EQ(BOOST_GET_CONST(float, op->GetAttr("Scale_data")), scale) + auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name")); + EXPECT_EQ(PADDLE_GET_CONST(float, op->GetAttr("Scale_data")), scale) << "Scale_data for node '" + op_name + "'."; - EXPECT_EQ(BOOST_GET_CONST(float, op->GetAttr("Shift_data")), shift) + EXPECT_EQ(PADDLE_GET_CONST(float, op->GetAttr("Shift_data")), shift) << "Shift_data for node '" + op_name + "'."; EXPECT_EQ(op->Input("Scale_weights").size(), 2u * layers) << "Scale_weights for node '" + op_name + "'."; - EXPECT_EQ(BOOST_GET_CONST(bool, op->GetAttr("force_fp32_output")), true) + EXPECT_EQ(PADDLE_GET_CONST(bool, op->GetAttr("force_fp32_output")), + true) << "force_fp32_output for node '" + op_name + "'."; } else if (op->Type() == "quantize") { quantize_nodes_count++; diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc index 89ac249c20d936..7c23976d3c6e28 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass.cc @@ -179,9 +179,9 @@ void CPUQuantizeSquashPass::DequantQuantSquash( auto* next_op_desc = next_op->Op(); float dequant_scale = - BOOST_GET_CONST(float, dequant_op->Op()->GetAttr("Scale")); + PADDLE_GET_CONST(float, dequant_op->Op()->GetAttr("Scale")); float quant_scale = - BOOST_GET_CONST(float, quant_op->Op()->GetAttr("Scale")); + PADDLE_GET_CONST(float, quant_op->Op()->GetAttr("Scale")); float dequant_shift = dequant_op->Op()->GetAttrIfExists("Shift"); float quant_shift = quant_op->Op()->GetAttrIfExists("Shift"); PADDLE_ENFORCE_NE( @@ -275,7 +275,7 @@ void CPUQuantizeSquashPass::OpRequantSquash(Graph* graph) const { requant_in->Name())); float requant_scale_out = - BOOST_GET_CONST(float, requant_op->Op()->GetAttr("Scale_out")); + PADDLE_GET_CONST(float, requant_op->Op()->GetAttr("Scale_out")); any_op->Op()->SetAttr("Scale_out", requant_scale_out); any_op->Op()->SetOutput(any_op_output_name, std::vector({requant_out->Name()})); @@ -488,10 +488,10 @@ void CPUQuantizeSquashPass::DequantScaleSquash(Graph* graph) const { GET_IR_NODE_FROM_SUBGRAPH(scale_out, scale_out, dequant_scale_pattern); if (dequant_out->outputs.size() == 1 && - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) { + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) { auto dequant_scale = dequant_op->Op()->GetAttrIfExists("Scale"); float scale_scale = - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale")); PADDLE_ENFORCE_GT(dequant_scale, 0.0f, @@ -540,10 +540,10 @@ void CPUQuantizeSquashPass::ScaleQuantSquash(Graph* graph) const { GET_IR_NODE_FROM_SUBGRAPH(quant_op, quant_op, scale_quant_pattern); if (quant_in->outputs.size() == 1 && - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) { + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("bias")) == 0.0f) { auto quant_scale = quant_op->Op()->GetAttrIfExists("Scale"); float scale_scale = - BOOST_GET_CONST(float, scale_op->Op()->GetAttr("scale")); + PADDLE_GET_CONST(float, scale_op->Op()->GetAttr("scale")); PADDLE_ENFORCE_GT( quant_scale, diff --git a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc index 6e9a591c9a0e6a..655cc95bf28a05 100644 --- a/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/cpu_quantize_squash_pass_tester.cc @@ -756,8 +756,8 @@ void EqualScaleTest(const ProgramDesc& prog, for (auto* node : graph->Nodes()) { if (node->IsOp() && - BOOST_GET_CONST(std::string, node->Op()->GetAttr("name")) == op_name) { - float op_scale = BOOST_GET_CONST(float, node->Op()->GetAttr(scale_name)); + PADDLE_GET_CONST(std::string, node->Op()->GetAttr("name")) == op_name) { + float op_scale = PADDLE_GET_CONST(float, node->Op()->GetAttr(scale_name)); EXPECT_EQ(op_scale, scale); } } @@ -775,10 +775,10 @@ void CheckRequantScalesTest(const ProgramDesc& prog, for (auto* node : graph->Nodes()) { if (node->IsOp() && node->Op()->Type() == "requantize") { float op_scale_in = - BOOST_GET_CONST(float, node->Op()->GetAttr("Scale_in")); + PADDLE_GET_CONST(float, node->Op()->GetAttr("Scale_in")); EXPECT_EQ(op_scale_in, scale_in); float op_scale_out = - BOOST_GET_CONST(float, node->Op()->GetAttr("Scale_out")); + PADDLE_GET_CONST(float, node->Op()->GetAttr("Scale_out")); EXPECT_EQ(op_scale_out, scale_out); } } diff --git a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc index 0e65c7d97440e8..f74e95fff10d89 100644 --- a/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/depthwise_conv_mkldnn_pass_tester.cc @@ -133,12 +133,12 @@ TEST(DepthwiseConvMKLDNNPass, basic) { if (node->IsOp()) { auto* op = node->Op(); if (op->Type() == "conv2d") { - if (BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))) + if (PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) after.mkldnn_conv_nodes++; else after.other_conv_nodes++; } else if (op->Type() == "depthwise_conv2d") { - if (BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))) + if (PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) after.mkldnn_depthwise_conv_nodes++; else after.other_depthwise_conv_nodes++; diff --git a/paddle/fluid/framework/ir/mkldnn/elt_act_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/elt_act_mkldnn_fuse_pass.cc index b28b07924d8884..3059be8a9a9728 100644 --- a/paddle/fluid/framework/ir/mkldnn/elt_act_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/elt_act_mkldnn_fuse_pass.cc @@ -68,7 +68,7 @@ void ElementwiseActivationOneDNNPass::FuseElementwiseAct( const std::string wo_elt_type = "The " + elt_type; // Workaround for PP error message checking. PADDLE_ENFORCE_EQ( - BOOST_GET_CONST(bool, elementwise_op->GetAttr("use_mkldnn")), + PADDLE_GET_CONST(bool, elementwise_op->GetAttr("use_mkldnn")), true, platform::errors::PreconditionNotMet( wo_elt_type + "+Act fusion may happen only when oneDNN library " @@ -85,7 +85,7 @@ void ElementwiseActivationOneDNNPass::FuseElementwiseAct( } if (act_type == "gelu" && activation_op->HasAttr("approximate") && - BOOST_GET_CONST(bool, activation_op->GetAttr("approximate"))) + PADDLE_GET_CONST(bool, activation_op->GetAttr("approximate"))) elementwise_op->SetAttr("fuse_activation", std::string("gelu_tanh")); else elementwise_op->SetAttr("fuse_activation", act_type); diff --git a/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc index e5031c83aac160..cdb0f70a56667d 100644 --- a/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass.cc @@ -56,14 +56,14 @@ void FuseFCActOneDNNPass::FuseFCAct(Graph *graph, if (fc_op->HasAttr("use_mkldnn")) { PADDLE_ENFORCE( - BOOST_GET_CONST(bool, fc_op->GetAttr("use_mkldnn")), + PADDLE_GET_CONST(bool, fc_op->GetAttr("use_mkldnn")), platform::errors::PreconditionNotMet( "The FC+Act fusion may happen only when oneDNN library " "is used.")); } if (act_type == "gelu" && act_op->HasAttr("approximate")) { - bool approximate = BOOST_GET_CONST(bool, act_op->GetAttr("approximate")); + bool approximate = PADDLE_GET_CONST(bool, act_op->GetAttr("approximate")); std::string type = approximate ? "_tanh" : "_erf"; fc_op->SetAttr("activation_type", act_type + type); } else { diff --git a/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass_tester.cc index faa9fd931e2dd6..38f253703ceeec 100644 --- a/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/fc_act_mkldnn_fuse_pass_tester.cc @@ -77,10 +77,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluTanh) { if (node->IsOp() && node->Op()->Type() == "fc") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("activation_type")); auto act_type = - BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("activation_type")); EXPECT_EQ(act_type.compare("gelu_tanh"), 0); } } @@ -112,10 +112,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluErf) { if (node->IsOp() && node->Op()->Type() == "fc") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("activation_type")); auto act_type = - BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("activation_type")); EXPECT_EQ(act_type.compare("gelu_erf"), 0); } } @@ -145,10 +145,10 @@ TEST(FuseFCActOneDNNPass, FuseWithGeluAuto) { if (node->IsOp() && node->Op()->Type() == "fc") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("activation_type")); auto act_type = - BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("activation_type")); EXPECT_EQ(act_type.compare("gelu"), 0); } } @@ -178,10 +178,10 @@ TEST(FuseFCActOneDNNPass, FuseWithTanh) { if (node->IsOp() && node->Op()->Type() == "fc") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("activation_type")); auto act_type = - BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("activation_type")); EXPECT_EQ(act_type.compare("tanh"), 0); } } @@ -212,10 +212,10 @@ TEST(FuseFCActOneDNNPass, FuseWithSigmoid) { if (node->IsOp() && node->Op()->Type() == "fc") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("activation_type")); auto act_type = - BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("activation_type")); EXPECT_EQ(act_type.compare("sigmoid"), 0); } } @@ -245,10 +245,10 @@ TEST(FuseFCActOneDNNPass, FuseWithMish) { if (node->IsOp() && node->Op()->Type() == "fc") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("activation_type")); auto act_type = - BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("activation_type")); EXPECT_EQ(act_type.compare("mish"), 0); } } @@ -279,10 +279,10 @@ TEST(FuseFCActOneDNNPass, FuseWithHardSwish) { if (node->IsOp() && node->Op()->Type() == "fc") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("activation_type")); auto act_type = - BOOST_GET_CONST(std::string, op->GetAttr("activation_type")); + PADDLE_GET_CONST(std::string, op->GetAttr("activation_type")); EXPECT_EQ(act_type.compare("hard_swish"), 0); } } diff --git a/paddle/fluid/framework/ir/mkldnn/matmul_activation_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/matmul_activation_mkldnn_fuse_pass.cc index 80f49c97e84658..d3f71e498bfe84 100644 --- a/paddle/fluid/framework/ir/mkldnn/matmul_activation_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/matmul_activation_mkldnn_fuse_pass.cc @@ -72,9 +72,10 @@ void MatmulActivationMkldnnFusePass::FuseMatmulAct( } if (act_type == "gelu" && activation->Op()->HasAttr("approximate")) { - act_type = BOOST_GET_CONST(bool, activation->Op()->GetAttr("approximate")) - ? "gelu_tanh" - : "gelu_erf"; + act_type = + PADDLE_GET_CONST(bool, activation->Op()->GetAttr("approximate")) + ? "gelu_tanh" + : "gelu_erf"; } matmul_op->SetAttr("fuse_activation", act_type); matmul_op->SetOutput("Out", {activation_out->Name()}); diff --git a/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc index b9f7c4eb23d564..09bf9c57c47288 100644 --- a/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/matmul_transpose_reshape_fuse_pass.cc @@ -113,9 +113,9 @@ void MatmulTransposeReshapeMKLDNNPass::ApplyImpl(ir::Graph *graph) const { GET_IR_NODE_FROM_SUBGRAPH(reshape_out, reshape_out, mtrp); GET_IR_NODE_FROM_SUBGRAPH(reshape_out_xshape, reshape_out_xshape, mtrp); auto reshape_shape = - BOOST_GET_CONST(std::vector, reshape_op->Op()->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, reshape_op->Op()->GetAttr("shape")); auto transpose_axis = - BOOST_GET_CONST(std::vector, transpose_op->Op()->GetAttr("axis")); + PADDLE_GET_CONST(std::vector, transpose_op->Op()->GetAttr("axis")); auto reshape_out_size = reshape_shape.size(); auto transpose_out_size = transpose_axis.size(); diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc index b7dde247be0614..02a56e819c03f7 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_inplace_pass.cc @@ -56,7 +56,7 @@ void MKLDNNInPlacePass::ApplyImpl(ir::Graph* graph) const { GET_IR_NODE_FROM_SUBGRAPH(next_op_out, next_op_out, mkldnn_inplace); if ((current_op->Op()->HasAttr("use_mkldnn") == false) || - (BOOST_GET_CONST(bool, current_op->Op()->GetAttr("use_mkldnn")) == + (PADDLE_GET_CONST(bool, current_op->Op()->GetAttr("use_mkldnn")) == false)) { VLOG(3) << "do not perform mkl-dnn inplace: use_mkldnn missing or set to " "false"; diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_pass_util.h b/paddle/fluid/framework/ir/mkldnn/mkldnn_pass_util.h index 2721c16ee008b1..a714f236c46165 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_pass_util.h +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_pass_util.h @@ -67,7 +67,7 @@ static void GetInfoFromTheFirstOp( if (pos != std::string::npos) { std::string name = fake_name.substr(0, pos); auto scales_vector = - BOOST_GET_CONST(std::vector, op_desc->GetAttr(fake_name)); + PADDLE_GET_CONST(std::vector, op_desc->GetAttr(fake_name)); info_map->insert(std::make_pair(name, scales_vector)); op_desc->RemoveAttr(fake_name); } diff --git a/paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass_tester.cc index b9c1954dc74e01..79b70e39aaf753 100644 --- a/paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/mkldnn_placement_pass_tester.cc @@ -145,7 +145,7 @@ class PlacementPassTest { if (node->IsOp()) { auto* op = node->Op(); if (op->HasAttr("use_mkldnn") && - BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))) { + PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))) { ++use_mkldnn_true_count; } } diff --git a/paddle/fluid/framework/ir/mkldnn/multi_gru_seq_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/multi_gru_seq_fuse_pass.cc index bf959b7f5bb90a..d143f087918c3a 100644 --- a/paddle/fluid/framework/ir/mkldnn/multi_gru_seq_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/multi_gru_seq_fuse_pass.cc @@ -103,8 +103,8 @@ void MultiGruSeqFusePass::ApplyImpl(ir::Graph* graph) const { multi_gru_desc.SetAttr(attr.first, attr.second); } - auto layers = BOOST_GET_CONST(int, gru1->Op()->GetAttr("layers")) + - BOOST_GET_CONST(int, gru2->Op()->GetAttr("layers")); + auto layers = PADDLE_GET_CONST(int, gru1->Op()->GetAttr("layers")) + + PADDLE_GET_CONST(int, gru2->Op()->GetAttr("layers")); multi_gru_desc.SetAttr("layers", layers); auto multi_gru = diff --git a/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc b/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc index 42c54fcb36242f..306dae8b4e9cb3 100644 --- a/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/quant_dequant_mkldnn_pass.cc @@ -72,7 +72,7 @@ void QuantDequantMkldnnPass::CollectInfoFromFake( if (op_desc->HasAttr("max_range")) { const float max_range = - BOOST_GET_CONST(float, op_desc->GetAttr("max_range")); + PADDLE_GET_CONST(float, op_desc->GetAttr("max_range")); std::vector thresholds = {127 * 127 / max_range}; weight_thresholds->insert(std::make_pair(x_var_name, thresholds)); } else { @@ -111,7 +111,7 @@ void QuantDequantMkldnnPass::CollectInputScalesFromFake( fake_quantize_types.count(op_node->Name())) { auto* op_desc = op_node->Op(); const int bit_length = - BOOST_GET_CONST(int, op_desc->GetAttr("bit_length")); + PADDLE_GET_CONST(int, op_desc->GetAttr("bit_length")); PADDLE_ENFORCE_EQ(bit_length, 8, platform::errors::InvalidArgument( @@ -160,7 +160,7 @@ void QuantDequantMkldnnPass::CollectOutputScalesFromAttr( auto* op_desc = op_node->Op(); if (op_desc->HasAttr("out_threshold")) { const float attr_scale = - BOOST_GET_CONST(float, op_desc->GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc->GetAttr("out_threshold")); if (attr_scale == 0.0) continue; float scale = 1.0 / attr_scale; std::vector scale_v = {scale}; diff --git a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc index 493e9e9dfbd50f..455e0621adb0f6 100644 --- a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass.cc @@ -101,11 +101,11 @@ void ShuffleChannelMKLDNNDetectPass::ApplyImpl(ir::Graph* graph) const { std::string output_name = reshape2_out->Name(); auto reshape1_shape = - BOOST_GET_CONST(std::vector, reshape1_desc->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, reshape1_desc->GetAttr("shape")); auto reshape2_shape = - BOOST_GET_CONST(std::vector, reshape2_desc->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, reshape2_desc->GetAttr("shape")); auto trans_axis = - BOOST_GET_CONST(std::vector, trans_desc->GetAttr("axis")); + PADDLE_GET_CONST(std::vector, trans_desc->GetAttr("axis")); auto* block1 = reshape1_desc->Block(); auto* block2 = reshape2_desc->Block(); if (block1 && block2) { diff --git a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass_tester.cc index 04b67f3f0d2658..95fba789ba7f69 100644 --- a/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/shuffle_channel_mkldnn_detect_pass_tester.cc @@ -69,7 +69,7 @@ void MainTest() { if (node->IsOp() && node->Op()->Type() == "shuffle_channel") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); } } } diff --git a/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass.cc b/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass.cc index 41e70e529bf73d..77df45c8e07858 100644 --- a/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass.cc +++ b/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass.cc @@ -62,7 +62,7 @@ void SoftplusActivationOneDNNPass::FuseSoftplusActivation( if (softplus_op->HasAttr("use_mkldnn")) { PADDLE_ENFORCE_EQ( - BOOST_GET_CONST(bool, softplus_op->GetAttr("use_mkldnn")), + PADDLE_GET_CONST(bool, softplus_op->GetAttr("use_mkldnn")), true, platform::errors::PreconditionNotMet("The softplus + activation " "fusion may happen only when " @@ -78,7 +78,7 @@ void SoftplusActivationOneDNNPass::FuseSoftplusActivation( } if (act_type == "gelu" && activation_op->HasAttr("approximate") && - BOOST_GET_CONST(bool, activation_op->GetAttr("approximate"))) + PADDLE_GET_CONST(bool, activation_op->GetAttr("approximate"))) softplus_op->SetAttr("fuse_activation", std::string("gelu_tanh")); else softplus_op->SetAttr("fuse_activation", act_type); diff --git a/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass_tester.cc b/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass_tester.cc index afe3d75fd2126f..02e53a9b8a0669 100644 --- a/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass_tester.cc +++ b/paddle/fluid/framework/ir/mkldnn/softplus_activation_mkldnn_fuse_pass_tester.cc @@ -49,40 +49,44 @@ void MainTest(const std::string& activation_type) { if (node->IsOp() && node->Op()->Type() == "softplus") { const auto* op = node->Op(); ASSERT_TRUE(op->HasAttr("use_mkldnn")); - EXPECT_TRUE(BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn"))); + EXPECT_TRUE(PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn"))); ASSERT_TRUE(op->HasAttr("fuse_activation")); auto activation_type = - BOOST_GET_CONST(std::string, op->GetAttr("fuse_activation")); + PADDLE_GET_CONST(std::string, op->GetAttr("fuse_activation")); EXPECT_EQ(activation_type.compare(activation_type), 0); } } } -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithTanh){MainTest("tanh")} +// clang-format off +TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithTanh) {MainTest("tanh")} -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithRelu){MainTest("relu")} +TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithRelu) {MainTest("relu")} TEST(FuseSoftplusActivationOneDNNPass, - FuseSoftplusWithLeakyRelu){MainTest("leaky_relu")} + FuseSoftplusWithLeakyRelu) {MainTest("leaky_relu")} -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSwish){MainTest("swish")} +TEST(FuseSoftplusActivationOneDNNPass, + FuseSoftplusWithSwish) {MainTest("swish")} TEST(FuseSoftplusActivationOneDNNPass, - FuseSoftplusWithHardswish){MainTest("hardswish")} + FuseSoftplusWithHardswish) {MainTest("hardswish")} -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSqrt){MainTest("sqrt")} +TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSqrt) {MainTest("sqrt")} -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithAbs){MainTest("abs")} +TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithAbs) {MainTest("abs")} -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithClip){MainTest("clip")} +TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithClip) {MainTest("clip")} -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithGelu){MainTest("gelu")} +TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithGelu) {MainTest("gelu")} -TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithRelu6){MainTest("relu6")} +TEST(FuseSoftplusActivationOneDNNPass, + FuseSoftplusWithRelu6) {MainTest("relu6")} TEST(FuseSoftplusActivationOneDNNPass, FuseSoftplusWithSigmoid) { MainTest("sigmoid") } +// clang-format on } // namespace ir } // namespace framework diff --git a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc index eb7b68f04919f1..7b203125681c54 100644 --- a/paddle/fluid/framework/ir/multi_batch_merge_pass.cc +++ b/paddle/fluid/framework/ir/multi_batch_merge_pass.cc @@ -87,7 +87,7 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const { node->Op(), platform::errors::InvalidArgument("Node(%s) must hold op description.", node->Name())); - int op_role = BOOST_GET_CONST( + int op_role = PADDLE_GET_CONST( int, node->Op()->GetAttr( framework::OpProtoAndCheckerMaker::OpRoleAttrName())); @@ -102,7 +102,7 @@ void BatchMergePass::ApplyImpl(ir::Graph* graph) const { auto op_role_var = node->Op()->GetNullableAttr( OpProtoAndCheckerMaker::OpRoleVarAttrName()); auto op_role_vars = - BOOST_GET_CONST(std::vector, op_role_var); + PADDLE_GET_CONST(std::vector, op_role_var); for (size_t i = 0; i < op_role_vars.size(); i += 2) { grad_names.insert(op_role_vars[i + 1]); gradname2paramname[op_role_vars[i + 1]] = op_role_vars[i]; diff --git a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc index 1305019cfd9b29..f1d13c23b1252f 100644 --- a/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc +++ b/paddle/fluid/framework/ir/multi_devices_graph_pass/multi_devices_graph_pass.cc @@ -53,7 +53,7 @@ typedef std::vector GraphOps; const char kGraphOps[] = "ops"; bool OpHaveRole(const ir::Node &node, const framework::OpRole &role) { - return BOOST_GET_CONST( + return PADDLE_GET_CONST( int, node.Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == static_cast(role); @@ -549,7 +549,7 @@ void MultiDevSSAGraphBuilderBase::CreateAllReduceOp(ir::Graph *result, "Please compile PaddlePaddle WITH_DGC first.")); #endif } else if (is_grad_merge) { - grad_merge_cond_name = BOOST_GET_CONST( + grad_merge_cond_name = PADDLE_GET_CONST( std::string, node->Op()->GetAttr(GRAD_MERGE_COND_NAME)); VLOG(10) << "og=" << og << " use grad_merge_allreduce"; #if defined(PADDLE_WITH_NCCL) || defined(PADDLE_WITH_RCCL) @@ -766,7 +766,7 @@ details::VarHandle *MultiDevSSAGraphBuilderBase::CreateReduceOp( bool MultiDevSSAGraphBuilderBase::IsScaleLossOp(ir::Node *node) const { return !loss_var_name_.empty() && node->Op() && - BOOST_GET_CONST( + PADDLE_GET_CONST( int, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) == (static_cast(OpRole::kBackward) | @@ -830,7 +830,7 @@ int BalanceVarSSAGraphBuilder::GetOpDeviceID(ir::Node *node) const { if (!OpHaveRole(*node, framework::OpRole::kOptimize)) { return -1; } - auto param_grad = BOOST_GET_CONST( + auto param_grad = PADDLE_GET_CONST( std::vector, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); @@ -951,7 +951,7 @@ int ReduceSSAGraphBuilder::GetOpDeviceID( return -1; } - auto param_grad = BOOST_GET_CONST( + auto param_grad = PADDLE_GET_CONST( std::vector, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); @@ -1007,7 +1007,7 @@ std::vector ReduceSSAGraphBuilder::SortForReduceMode( // gradients. sorted_ops.emplace_back(node); bool is_bk_op = static_cast( - BOOST_GET_CONST( + PADDLE_GET_CONST( int, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())) & static_cast(OpRole::kBackward)); @@ -1062,9 +1062,9 @@ bool DistSSAGraphBuilder::DealWithSpecialOp(ir::Graph *result, node->Name())); if (node->Op()->Type() == "recv") { auto recv_vars_attr = - BOOST_GET_CONST(std::vector, - node->Op()->GetNullableAttr( - OpProtoAndCheckerMaker::OpRoleVarAttrName())); + PADDLE_GET_CONST(std::vector, + node->Op()->GetNullableAttr( + OpProtoAndCheckerMaker::OpRoleVarAttrName())); PADDLE_ENFORCE_EQ( recv_vars_attr.size(), 2UL, @@ -1138,7 +1138,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { for (ir::Node *n : node->inputs) { input_var_names.push_back(n->Name()); } - auto send_param_grad = BOOST_GET_CONST( + auto send_param_grad = PADDLE_GET_CONST( std::vector, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); PADDLE_ENFORCE_EQ( @@ -1162,7 +1162,7 @@ int DistSSAGraphBuilder::CreateRPCOp(ir::Graph *result, ir::Node *node) const { for (ir::Node *n : node->outputs) { output_var_names.push_back(n->Name()); } - auto recv_param_grad = BOOST_GET_CONST( + auto recv_param_grad = PADDLE_GET_CONST( std::vector, node->Op()->GetAttr(OpProtoAndCheckerMaker::OpRoleVarAttrName())); if (recv_param_grad.size() == 2U) { diff --git a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc index 9bcecddc7cfbd9..c7274e8ce36dae 100644 --- a/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/multihead_matmul_fuse_pass.cc @@ -66,10 +66,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { Node* reshape2_qkv_out, Node* scale, Node* scale_out) { - auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); - // auto scale_bias = BOOST_GET_CONST(float, scale->Op()->GetAttr("bias")); + auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale")); + // auto scale_bias = PADDLE_GET_CONST(float, scale->Op()->GetAttr("bias")); // bool after_scale = - // BOOST_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale")); + // PADDLE_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale")); // create multihead OpDesc multihead_op_desc(mul0->Op()->Block()); @@ -89,7 +89,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { auto reshape_desc = reshape2->Op(); int head_number = - BOOST_GET_CONST(std::vector, reshape_desc->GetAttr("shape")).at(2); + PADDLE_GET_CONST(std::vector, reshape_desc->GetAttr("shape")) + .at(2); ReplaceOutputVar(mul0, mul0_out, q_var_node); ReplaceOutputVar(mul1, mul1_out, k_var_node); @@ -803,7 +804,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, Node* eltadd2, Node* matmul_qk, Node* reshape2_qkv) { - auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); + auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale")); // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H) @@ -890,7 +891,8 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, auto reshape_desc = reshape2->Op(); int head_number = - BOOST_GET_CONST(std::vector, reshape_desc->GetAttr("shape")).at(2); + PADDLE_GET_CONST(std::vector, reshape_desc->GetAttr("shape")) + .at(2); OpDesc multihead_op_desc(mul0->Op()->Block()); multihead_op_desc.SetType("multihead_matmul"); @@ -916,11 +918,11 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, auto* add2_op_desc = eltadd2->Op(); if (add0_op_desc->HasAttr("out_threshold")) { auto out_scale0 = - BOOST_GET_CONST(float, add0_op_desc->GetAttr("out_threshold")); + PADDLE_GET_CONST(float, add0_op_desc->GetAttr("out_threshold")); auto out_scale1 = - BOOST_GET_CONST(float, add1_op_desc->GetAttr("out_threshold")); + PADDLE_GET_CONST(float, add1_op_desc->GetAttr("out_threshold")); auto out_scale2 = - BOOST_GET_CONST(float, add2_op_desc->GetAttr("out_threshold")); + PADDLE_GET_CONST(float, add2_op_desc->GetAttr("out_threshold")); auto out_scale_max = std::max(out_scale0, out_scale1); out_scale_max = std::max(out_scale_max, out_scale2); multihead_op_desc.SetAttr("fc_out_threshold", out_scale_max); @@ -931,7 +933,7 @@ int MultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, if (matmul_qk_op_desc->HasAttr("Input_scale")) { multihead_op_desc.SetAttr("qkv2context_plugin_int8", true); if (softmax_qk_op_desc->HasAttr("out_threshold")) { - auto qkv_plugin_scale = BOOST_GET_CONST( + auto qkv_plugin_scale = PADDLE_GET_CONST( float, softmax_qk_op_desc->GetAttr("out_threshold")); multihead_op_desc.SetAttr("dp_probs", qkv_plugin_scale); } @@ -1287,7 +1289,8 @@ int MultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, Node* reshape2, Node* reshape2_qkv_out, Node* matmul_qk) { - auto scale_attr = BOOST_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha")); + auto scale_attr = + PADDLE_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha")); // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H) @@ -1374,7 +1377,8 @@ int MultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, auto reshape_desc = reshape2->Op(); int head_number = - BOOST_GET_CONST(std::vector, reshape_desc->GetAttr("shape")).at(2); + PADDLE_GET_CONST(std::vector, reshape_desc->GetAttr("shape")) + .at(2); OpDesc multihead_op_desc(mul0->Op()->Block()); multihead_op_desc.SetType("multihead_matmul"); diff --git a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc index 7bf041c3a720d6..a9b17634531b0b 100644 --- a/paddle/fluid/framework/ir/op_compat_sensible_pass.cc +++ b/paddle/fluid/framework/ir/op_compat_sensible_pass.cc @@ -37,7 +37,7 @@ std::unordered_set global_extra_attrs = { "use_cudnn", "name", "with_quant_attr"}; -} +} // namespace namespace paddle { namespace framework { @@ -45,14 +45,14 @@ namespace ir { AttrCompat& AttrCompat::IsStringEQ(const std::string& value) { conditions_.emplace_back([value](const Attribute& attr) -> bool { - return value == BOOST_GET_CONST(std::string, attr); + return value == PADDLE_GET_CONST(std::string, attr); }); return *this; } AttrCompat& AttrCompat::IsStringIn(const std::set& candidates) { conditions_.emplace_back([candidates](const Attribute& attr) -> bool { - std::string value = BOOST_GET_CONST(std::string, attr); + std::string value = PADDLE_GET_CONST(std::string, attr); for (auto& str : candidates) { if (str == value) { return true; @@ -66,7 +66,7 @@ AttrCompat& AttrCompat::IsStringIn(const std::set& candidates) { AttrCompat& AttrCompat::IsStringMatch( const std::function& func) { conditions_.emplace_back([func](const Attribute& attr) -> bool { - std::string value = BOOST_GET_CONST(std::string, attr); + std::string value = PADDLE_GET_CONST(std::string, attr); return func(value); }); return *this; @@ -74,7 +74,7 @@ AttrCompat& AttrCompat::IsStringMatch( AttrCompat& AttrCompat::IsIntIn(const std::set& candidates) { conditions_.emplace_back([candidates](const Attribute& attr) -> bool { - int value = BOOST_GET_CONST(int, attr); + int value = PADDLE_GET_CONST(int, attr); return candidates.find(value) != candidates.end(); }); return *this; @@ -134,7 +134,7 @@ AttrCompat& AttrCompat::IsOptional() { AttrCompat& AttrCompat::IsBoolEQ(bool v) { conditions_.emplace_back([v](const Attribute& attr) -> bool { - bool value = BOOST_GET_CONST(bool, attr); + bool value = PADDLE_GET_CONST(bool, attr); return value == v; }); return *this; diff --git a/paddle/fluid/framework/ir/op_compat_sensible_pass.h b/paddle/fluid/framework/ir/op_compat_sensible_pass.h index 393a2fb9392d5f..ea4e09c6d09762 100644 --- a/paddle/fluid/framework/ir/op_compat_sensible_pass.h +++ b/paddle/fluid/framework/ir/op_compat_sensible_pass.h @@ -224,7 +224,7 @@ AttrCompat& AttrCompat::IsType() { template AttrCompat& AttrCompat::IsNumGT(T v) { conditions_.emplace_back([v](const Attribute& attr) -> bool { - T value = BOOST_GET_CONST(T, attr); + T value = PADDLE_GET_CONST(T, attr); return value > v; }); return *this; @@ -233,7 +233,7 @@ AttrCompat& AttrCompat::IsNumGT(T v) { template AttrCompat& AttrCompat::IsNumGE(T v) { conditions_.emplace_back([v](const Attribute& attr) -> bool { - T value = BOOST_GET_CONST(T, attr); + T value = PADDLE_GET_CONST(T, attr); return value >= v; }); return *this; @@ -242,7 +242,7 @@ AttrCompat& AttrCompat::IsNumGE(T v) { template AttrCompat& AttrCompat::IsNumLT(T v) { conditions_.emplace_back([v](const Attribute& attr) -> bool { - T value = BOOST_GET_CONST(T, attr); + T value = PADDLE_GET_CONST(T, attr); return value < v; }); return *this; @@ -251,7 +251,7 @@ AttrCompat& AttrCompat::IsNumLT(T v) { template AttrCompat& AttrCompat::IsNumLE(T v) { conditions_.emplace_back([v](const Attribute& attr) -> bool { - T value = BOOST_GET_CONST(T, attr); + T value = PADDLE_GET_CONST(T, attr); return value <= v; }); return *this; @@ -260,7 +260,7 @@ AttrCompat& AttrCompat::IsNumLE(T v) { template AttrCompat& AttrCompat::IsNumEQ(T v) { conditions_.emplace_back([v](const Attribute& attr) -> bool { - T value = BOOST_GET_CONST(T, attr); + T value = PADDLE_GET_CONST(T, attr); return value == v; }); return *this; @@ -269,7 +269,7 @@ AttrCompat& AttrCompat::IsNumEQ(T v) { template AttrCompat& AttrCompat::IsNumMatch(bool (*func)(T)) { conditions_.emplace_back([func](const Attribute& attr) -> bool { - T value = BOOST_GET_CONST(T, attr); + T value = PADDLE_GET_CONST(T, attr); return func(value); }); return *this; diff --git a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc index 0099163ef78d73..1ff738aeedd521 100644 --- a/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc +++ b/paddle/fluid/framework/ir/quant_conv2d_dequant_fuse_pass.cc @@ -350,7 +350,7 @@ void QuantDequantFusePass::DeleteQuant(ir::Graph* graph, Node* quant = subgraph.at(pattern.GetPDNode("quant_node")); Node* output_scale = subgraph.at(pattern.GetPDNode("output_scale_node")); Node* output_act = subgraph.at(pattern.GetPDNode("output_act_node")); - int bit_length = BOOST_GET_CONST(int, quant->Op()->GetAttr("bit_length")); + int bit_length = PADDLE_GET_CONST(int, quant->Op()->GetAttr("bit_length")); // Get input scale from tensor std::string input_scale_var_name = quant->Op()->Input("InScale").front(); @@ -464,13 +464,13 @@ void QuantDequantFusePass::FuseDequant(ir::Graph* graph, std::unordered_set nodes2rm = {}; int bit_length = - BOOST_GET_CONST(int, quantized_op_node->Op()->GetAttr("bit_length")); + PADDLE_GET_CONST(int, quantized_op_node->Op()->GetAttr("bit_length")); int range = ((1 << (bit_length - 1)) - 1); std::vector weight_scale; int quant_axis = 0; if (dequant_op_node->Op()->HasAttr("quant_axis")) { quant_axis = - BOOST_GET_CONST(int, dequant_op_node->Op()->GetAttr("quant_axis")); + PADDLE_GET_CONST(int, dequant_op_node->Op()->GetAttr("quant_axis")); } // Get weight scale if (dequant_type == "fake_channel_wise_dequantize_max_abs") { @@ -497,7 +497,7 @@ void QuantDequantFusePass::FuseDequant(ir::Graph* graph, nodes2rm.insert(dequant_channel_scale_node); } else { float max_range = - BOOST_GET_CONST(float, dequant_op_node->Op()->GetAttr("max_range")); + PADDLE_GET_CONST(float, dequant_op_node->Op()->GetAttr("max_range")); weight_scale.push_back((range * range) / max_range / range); } diff --git a/paddle/fluid/framework/ir/remove_padding_recover_padding_pass.cc b/paddle/fluid/framework/ir/remove_padding_recover_padding_pass.cc index d0023798bb4d43..cb519a28ea38bd 100644 --- a/paddle/fluid/framework/ir/remove_padding_recover_padding_pass.cc +++ b/paddle/fluid/framework/ir/remove_padding_recover_padding_pass.cc @@ -413,7 +413,7 @@ void RemovePaddingRecoverPaddingPass::ApplyImpl(ir::Graph* graph) const { check_flag = false; } - if (BOOST_GET_CONST(int, fc_op->Op()->GetAttr("in_num_col_dims")) != 2) { + if (PADDLE_GET_CONST(int, fc_op->Op()->GetAttr("in_num_col_dims")) != 2) { check_flag = false; } if (!check_flag) { diff --git a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc index 31dfafc6d239ba..6d2241d280be3e 100644 --- a/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc +++ b/paddle/fluid/framework/ir/repeated_fc_relu_fuse_pass.cc @@ -70,7 +70,7 @@ static bool IsOutputOfFC(Node* n) { static bool IsFCWithAct(Node* n, const std::string& act_type = "relu") { if (n && n->IsOp() && n->Op() && n->Op()->Type() == "fc" && n->inputs.size() == 3U && n->outputs.size() == 1U) { - return BOOST_GET_CONST(std::string, n->Op()->GetAttr("activation_type")) == + return PADDLE_GET_CONST(std::string, n->Op()->GetAttr("activation_type")) == act_type; } return false; @@ -81,7 +81,7 @@ static bool IsFCWithPaddingWeights(Node* n) { if (n && n->IsOp() && n->Op() && n->Op()->Type() == "fc" && n->inputs.size() == 3U && n->outputs.size() == 1U) { if (n->Op()->HasAttr("padding_weights")) { - res = BOOST_GET_CONST(bool, n->Op()->GetAttr("padding_weights")); + res = PADDLE_GET_CONST(bool, n->Op()->GetAttr("padding_weights")); } } return res; diff --git a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc index 9f0ec3bec67ca5..be1a4de55d574e 100644 --- a/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc +++ b/paddle/fluid/framework/ir/seqpool_concat_fuse_pass.cc @@ -49,7 +49,7 @@ PDNode* BuildSeqPoolConcatPattern(PDPattern* pattern, bool this_is_seqpool_op = x && x->IsOp() && x->Op()->Type() == "sequence_pool" && x->Op()->HasAttr("pooltype") && - BOOST_GET_CONST(std::string, x->Op()->GetAttr("pooltype")) == type && + PADDLE_GET_CONST(std::string, x->Op()->GetAttr("pooltype")) == type && x->outputs.size() == 2; // seqpool should only have 2 outputs bool satisfied_all = this_is_seqpool_op; if (this_is_seqpool_op) { diff --git a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc index 9a514c11847016..fb98c3b1216c6b 100644 --- a/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc +++ b/paddle/fluid/framework/ir/shuffle_channel_detect_pass.cc @@ -101,11 +101,11 @@ void ShuffleChannelDetectPass::ApplyImpl(ir::Graph* graph) const { std::string output_name = reshape2_out->Name(); auto reshape1_shape = - BOOST_GET_CONST(std::vector, reshape1_desc->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, reshape1_desc->GetAttr("shape")); auto reshape2_shape = - BOOST_GET_CONST(std::vector, reshape2_desc->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, reshape2_desc->GetAttr("shape")); auto trans_axis = - BOOST_GET_CONST(std::vector, trans_desc->GetAttr("axis")); + PADDLE_GET_CONST(std::vector, trans_desc->GetAttr("axis")); auto* block1 = reshape1_desc->Block(); auto* block2 = reshape2_desc->Block(); if (block1 && block2) { diff --git a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc index b1480bac8b811e..217cf85bd01d6c 100644 --- a/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc +++ b/paddle/fluid/framework/ir/simplify_with_basic_ops_pass.cc @@ -79,10 +79,10 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout( // dropout_op is INT. if (dropout_op_desc->HasAttr("is_test")) { if (dropout_op_desc->GetAttrType("is_test") == proto::AttrType::BOOLEAN) { - is_test = BOOST_GET_CONST(bool, dropout_op_desc->GetAttr("is_test")); + is_test = PADDLE_GET_CONST(bool, dropout_op_desc->GetAttr("is_test")); } else if (dropout_op_desc->GetAttrType("is_test") == proto::AttrType::INT) { - is_test = BOOST_GET_CONST(int, dropout_op_desc->GetAttr("is_test")) == 0 + is_test = PADDLE_GET_CONST(int, dropout_op_desc->GetAttr("is_test")) == 0 ? false : true; } @@ -100,14 +100,14 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout( if (dropout_op_desc->HasAttr("dropout_implementation")) { if (dropout_op_desc->GetAttrType("dropout_implementation") == proto::AttrType::BOOLEAN) { - upscale_in_train = BOOST_GET_CONST( + upscale_in_train = PADDLE_GET_CONST( bool, dropout_op_desc->GetAttr("dropout_implementation")); } else if (dropout_op_desc->GetAttrType("dropout_implementation") == proto::AttrType::STRING) { upscale_in_train = - BOOST_GET_CONST(std::string, - dropout_op_desc->GetAttr("dropout_implementation")) == - "upscale_in_train"; + PADDLE_GET_CONST(std::string, + dropout_op_desc->GetAttr( + "dropout_implementation")) == "upscale_in_train"; } } @@ -156,8 +156,8 @@ bool SimplifyWithBasicOpsPass::SimplifyDropout( // | // \|/ // dropout_x -> scale_op -> dropout_out -> next_op -> next_out - float scale = - 1.0f - BOOST_GET_CONST(float, dropout_op_desc->GetAttr("dropout_prob")); + float scale = 1.0f - PADDLE_GET_CONST( + float, dropout_op_desc->GetAttr("dropout_prob")); framework::OpDesc new_op_desc(dropout_op_desc->Block()); new_op_desc.SetType("scale"); diff --git a/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc b/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc index b3b6787140c2bb..4a443dc70860c4 100644 --- a/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc +++ b/paddle/fluid/framework/ir/sync_batch_norm_pass_tester.cc @@ -84,7 +84,7 @@ TEST(IsTestPass, basic) { for (auto* node : graph->Nodes()) { if (node->IsOp()) { auto* op = node->Op(); - auto op_name = BOOST_GET_CONST(std::string, op->GetAttr("name")); + auto op_name = PADDLE_GET_CONST(std::string, op->GetAttr("name")); if (op_name == "bn") { ASSERT_EQ(op->Type(), "sync_batch_norm"); } diff --git a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc index e7c7bec50a0622..64f2801bf0220e 100644 --- a/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc +++ b/paddle/fluid/framework/ir/transpose_flatten_concat_fuse_pass.cc @@ -117,18 +117,18 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse( input_nodes[i]->name())); if (i == 0) { - trans_axis0 = BOOST_GET_CONST( + trans_axis0 = PADDLE_GET_CONST( std::vector, subgraph.at(pattern.GetPDNode("transpose" + std::to_string(0))) ->Op() ->GetAttr("axis")); - flatten_axis0 = BOOST_GET_CONST( + flatten_axis0 = PADDLE_GET_CONST( int, subgraph.at(pattern.GetPDNode("flatten" + std::to_string(0))) ->Op() ->GetAttr("axis")); } else { - std::vector trans_axis = BOOST_GET_CONST( + std::vector trans_axis = PADDLE_GET_CONST( std::vector, subgraph.at(pattern.GetPDNode("transpose" + std::to_string(i))) ->Op() @@ -136,7 +136,7 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse( // All axis of transpose should be the same if (trans_axis0 != trans_axis) return; - int flatten_axis = BOOST_GET_CONST( + int flatten_axis = PADDLE_GET_CONST( int, subgraph.at(pattern.GetPDNode("flatten" + std::to_string(0))) ->Op() @@ -159,11 +159,11 @@ void TransposeFlattenConcatFusePass::RunTransposeFlattenConcatFuse( Node *concat_op = subgraph.at(pattern.GetPDNode("concat")); Node *concat_out = subgraph.at(pattern.GetPDNode("concat_out")); std::vector input_names; - std::vector trans_axis = BOOST_GET_CONST( + std::vector trans_axis = PADDLE_GET_CONST( std::vector, nodes[kTransOffset]->Op()->GetAttr("axis")); int flatten_axis = - BOOST_GET_CONST(int, nodes[kFlattenOffset]->Op()->GetAttr("axis")); - int concat_axis = BOOST_GET_CONST(int, concat_op->Op()->GetAttr("axis")); + PADDLE_GET_CONST(int, nodes[kFlattenOffset]->Op()->GetAttr("axis")); + int concat_axis = PADDLE_GET_CONST(int, concat_op->Op()->GetAttr("axis")); std::string output_name = concat_out->Name(); for (int i = 0; i < times; i++) { diff --git a/paddle/fluid/framework/ir/trt_map_matmul_to_mul_pass.cc b/paddle/fluid/framework/ir/trt_map_matmul_to_mul_pass.cc index 2f2ef1a4102437..dceacef0010ef5 100644 --- a/paddle/fluid/framework/ir/trt_map_matmul_to_mul_pass.cc +++ b/paddle/fluid/framework/ir/trt_map_matmul_to_mul_pass.cc @@ -272,8 +272,8 @@ void TrtMapMatmul2MulPass::ApplyImpl(ir::Graph* graph) const { bool flag = true; bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); flag = flag && !transpose_X && std::abs(alpha - 1.0) < 1e-5; std::vector x_shape = matmul_in_x->Var()->GetShape(); @@ -359,7 +359,7 @@ void TrtMapMatmulV2ToMulPass::ApplyImpl(ir::Graph* graph) const { bool flag = true; bool trans_x = - BOOST_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x")); + PADDLE_GET_CONST(bool, matmul_v2_op->Op()->GetAttr("trans_x")); flag = flag && !trans_x; std::vector x_shape = matmul_v2_in_x->Var()->GetShape(); @@ -531,17 +531,17 @@ void TrtSqueeze2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { size_t squeeze2_in_x_rank = (squeeze2_in_x->Var()->GetShape()).size(); std::vector squeeze2_op_axes = - BOOST_GET_CONST(std::vector, squeeze2_op->Op()->GetAttr("axes")); + PADDLE_GET_CONST(std::vector, squeeze2_op->Op()->GetAttr("axes")); flag = flag && squeeze2_in_x_rank == 4 && squeeze2_op_axes == std::vector{2, 3} && (matmul_in_x->outputs).size() == 1 && matmul_in_y->Var()->Persistable(); bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); bool transpose_Y = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); flag = flag && !transpose_X && !transpose_Y && @@ -690,16 +690,16 @@ void TrtReshape2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { auto reshape2_in_x_shape = reshape2_in_x->Var()->GetShape(); size_t reshape2_in_x_rank = reshape2_in_x_shape.size(); std::vector reshape2_op_shape = - BOOST_GET_CONST(std::vector, reshape2_op->Op()->GetAttr("shape")); + PADDLE_GET_CONST(std::vector, reshape2_op->Op()->GetAttr("shape")); flag = flag && reshape2_in_nums == 1 && reshape2_in_x_rank == 4 && reshape2_in_x_shape[2] == 1 && reshape2_in_x_shape[3] == 1 && reshape2_op_shape.size() == 2 && (matmul_in_x->outputs).size() == 1; bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); bool transpose_Y = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); flag = flag && !transpose_X && !transpose_Y && @@ -786,7 +786,7 @@ void TrtFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { auto flatten2_in_x_shape = flatten2_in_x->Var()->GetShape(); size_t flatten2_in_x_rank = flatten2_in_x_shape.size(); int flatten2_axis = - BOOST_GET_CONST(int, flatten2_op->Op()->GetAttr("axis")); + PADDLE_GET_CONST(int, flatten2_op->Op()->GetAttr("axis")); // only convert matmul to mul when the flatten2 has a single input // and the rank of input is 4 and the size of the output of matmul // is 1. @@ -795,10 +795,10 @@ void TrtFlatten2MatmulFusePass::ApplyImpl(ir::Graph* graph) const { (matmul_in_x->outputs).size() == 1; bool transpose_X = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_X")); bool transpose_Y = - BOOST_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); - float alpha = BOOST_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); + PADDLE_GET_CONST(bool, matmul_op->Op()->GetAttr("transpose_Y")); + float alpha = PADDLE_GET_CONST(float, matmul_op->Op()->GetAttr("alpha")); size_t matmul_in_x_rank = (matmul_in_x->Var()->GetShape()).size(); size_t matmul_in_y_rank = (matmul_in_y->Var()->GetShape()).size(); pattern_found = pattern_found && !transpose_X && !transpose_Y && diff --git a/paddle/fluid/framework/ir/trt_multihead_matmul_fuse_pass.cc b/paddle/fluid/framework/ir/trt_multihead_matmul_fuse_pass.cc index eb5b734291da51..63f2d86b42de24 100644 --- a/paddle/fluid/framework/ir/trt_multihead_matmul_fuse_pass.cc +++ b/paddle/fluid/framework/ir/trt_multihead_matmul_fuse_pass.cc @@ -66,10 +66,10 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { Node* reshape2_qkv_out, Node* scale, Node* scale_out) { - auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); - // auto scale_bias = BOOST_GET_CONST(float, scale->Op()->GetAttr("bias")); + auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale")); + // auto scale_bias = PADDLE_GET_CONST(float, scale->Op()->GetAttr("bias")); // bool after_scale = - // BOOST_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale")); + // PADDLE_GET_CONST(bool, scale->Op()->GetAttr("bias_after_scale")); // create multihead OpDesc multihead_op_desc(mul0->Op()->Block()); @@ -89,7 +89,8 @@ static int BuildFusion(Graph* graph, const std::string& name_scope) { auto reshape_desc = reshape2->Op(); int head_number = - BOOST_GET_CONST(std::vector, reshape_desc->GetAttr("shape")).at(2); + PADDLE_GET_CONST(std::vector, reshape_desc->GetAttr("shape")) + .at(2); ReplaceOutputVar(mul0, mul0_out, q_var_node); ReplaceOutputVar(mul1, mul1_out, k_var_node); @@ -822,7 +823,7 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, Node* eltadd2, Node* matmul_qk, Node* reshape2_qkv) { - auto scale_attr = BOOST_GET_CONST(float, scale->Op()->GetAttr("scale")); + auto scale_attr = PADDLE_GET_CONST(float, scale->Op()->GetAttr("scale")); // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H) @@ -909,7 +910,8 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, auto reshape_desc = reshape2->Op(); int head_number = - BOOST_GET_CONST(std::vector, reshape_desc->GetAttr("shape")).at(2); + PADDLE_GET_CONST(std::vector, reshape_desc->GetAttr("shape")) + .at(2); OpDesc multihead_op_desc(mul0->Op()->Block()); multihead_op_desc.SetType("multihead_matmul"); @@ -935,11 +937,11 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, auto* add2_op_desc = eltadd2->Op(); if (add0_op_desc->HasAttr("out_threshold")) { auto out_scale0 = - BOOST_GET_CONST(float, add0_op_desc->GetAttr("out_threshold")); + PADDLE_GET_CONST(float, add0_op_desc->GetAttr("out_threshold")); auto out_scale1 = - BOOST_GET_CONST(float, add1_op_desc->GetAttr("out_threshold")); + PADDLE_GET_CONST(float, add1_op_desc->GetAttr("out_threshold")); auto out_scale2 = - BOOST_GET_CONST(float, add2_op_desc->GetAttr("out_threshold")); + PADDLE_GET_CONST(float, add2_op_desc->GetAttr("out_threshold")); auto out_scale_max = std::max(out_scale0, out_scale1); out_scale_max = std::max(out_scale_max, out_scale2); multihead_op_desc.SetAttr("fc_out_threshold", out_scale_max); @@ -950,7 +952,7 @@ int TrtMultiHeadMatmulV2FusePass::BuildFusionV2(Graph* graph, if (matmul_qk_op_desc->HasAttr("Input_scale")) { multihead_op_desc.SetAttr("qkv2context_plugin_int8", true); if (softmax_qk_op_desc->HasAttr("out_threshold")) { - auto qkv_plugin_scale = BOOST_GET_CONST( + auto qkv_plugin_scale = PADDLE_GET_CONST( float, softmax_qk_op_desc->GetAttr("out_threshold")); multihead_op_desc.SetAttr("dp_probs", qkv_plugin_scale); } @@ -1337,7 +1339,8 @@ int TrtMultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, Node* reshape2, Node* reshape2_qkv_out, Node* matmul_qk) { - auto scale_attr = BOOST_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha")); + auto scale_attr = + PADDLE_GET_CONST(float, matmul_qk->Op()->GetAttr("alpha")); // mul (B * S * Hidden) x (Hidden * 3 * N * H) = (B * S * 3 * N * H) // bias (B * S * 3 * N * H) + bias (3 * N * H) @@ -1424,7 +1427,8 @@ int TrtMultiHeadMatmulV3FusePass::BuildFusionV3(Graph* graph, auto reshape_desc = reshape2->Op(); int head_number = - BOOST_GET_CONST(std::vector, reshape_desc->GetAttr("shape")).at(2); + PADDLE_GET_CONST(std::vector, reshape_desc->GetAttr("shape")) + .at(2); OpDesc multihead_op_desc(mul0->Op()->Block()); multihead_op_desc.SetType("multihead_matmul"); diff --git a/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc b/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc index d53431d260eaff..85d709c20db21f 100644 --- a/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc +++ b/paddle/fluid/framework/ir/unsqueeze2_eltwise_fuse_pass.cc @@ -153,9 +153,9 @@ void UnsqueezeEltwiseFusePass::ApplyImpl(ir::Graph *graph) const { size_t eltwise_in_x_rank = (subgraph.at(x)->Var()->GetShape()).size(); size_t unsqz_in_rank = (subgraph.at(y)->Var()->GetShape()).size(); std::vector unsqz_op_axes = - BOOST_GET_CONST(std::vector, unsqz_op->Op()->GetAttr("axes")); + PADDLE_GET_CONST(std::vector, unsqz_op->Op()->GetAttr("axes")); int eltwise_op_axis = - BOOST_GET_CONST(int, eltwise_op->Op()->GetAttr("axis")); + PADDLE_GET_CONST(int, eltwise_op->Op()->GetAttr("axis")); if (eltwise_in_x_rank == 4 && unsqz_in_rank == 2 && unsqz_op_axes == std::vector{2, 3} && eltwise_op_axis == -1) { diff --git a/paddle/fluid/framework/new_executor/standalone_executor_test.cc b/paddle/fluid/framework/new_executor/standalone_executor_test.cc index 3db2069eb23e6f..1816b0942f46f4 100644 --- a/paddle/fluid/framework/new_executor/standalone_executor_test.cc +++ b/paddle/fluid/framework/new_executor/standalone_executor_test.cc @@ -121,17 +121,17 @@ ProgramDesc GetLmMainProgram() { int64_t batch_size = 20; auto& op1 = global_block.AllOps()[1]; - auto shape1 = BOOST_GET_CONST(std::vector, op1->GetAttr("shape")); + auto shape1 = PADDLE_GET_CONST(std::vector, op1->GetAttr("shape")); shape1[0] = batch_size * 20; op1->SetAttr("shape", shape1); auto& op2 = global_block.AllOps()[2]; - auto shape2 = BOOST_GET_CONST(std::vector, op2->GetAttr("shape")); + auto shape2 = PADDLE_GET_CONST(std::vector, op2->GetAttr("shape")); shape2[0] = batch_size; op2->SetAttr("shape", shape2); auto& op3 = global_block.AllOps()[3]; - auto shape3 = BOOST_GET_CONST(std::vector, op3->GetAttr("shape")); + auto shape3 = PADDLE_GET_CONST(std::vector, op3->GetAttr("shape")); shape3[0] = batch_size; op3->SetAttr("shape", shape3); return main_prog; @@ -228,7 +228,7 @@ void TestShareWorkQueue(const ProgramDesc& prog, FetchList fetch_list = core->Run(feed_names, feed_tensors); for (size_t i = 0; i < fetch_list.size(); ++i) { const float* fetch_data = - BOOST_GET_CONST(LoDTensor, fetch_list[i]).data(); + PADDLE_GET_CONST(LoDTensor, fetch_list[i]).data(); ASSERT_FLOAT_EQ(*fetch_data, fetch_results.at(i)); } }; diff --git a/paddle/fluid/framework/no_need_buffer_vars_inference_test.cc b/paddle/fluid/framework/no_need_buffer_vars_inference_test.cc index a2c7df763a7ef7..d31a9680c16ea6 100644 --- a/paddle/fluid/framework/no_need_buffer_vars_inference_test.cc +++ b/paddle/fluid/framework/no_need_buffer_vars_inference_test.cc @@ -31,7 +31,7 @@ TEST(test_no_need_buffer_vars_inference, test_static_graph) { ASSERT_TRUE(ctx.HasOutput("Out")); ASSERT_FALSE(ctx.HasOutput("X")); - ASSERT_TRUE(BOOST_GET_CONST(bool, ctx.GetAttr("is_test"))); + ASSERT_TRUE(PADDLE_GET_CONST(bool, ctx.GetAttr("is_test"))); } TEST(test_no_need_buffer_vars_inference, test_dygraph) { @@ -46,7 +46,7 @@ TEST(test_no_need_buffer_vars_inference, test_dygraph) { ASSERT_TRUE(ctx.HasOutput("Out")); ASSERT_FALSE(ctx.HasOutput("X")); - ASSERT_TRUE(BOOST_GET_CONST(bool, ctx.GetAttr("is_test"))); + ASSERT_TRUE(PADDLE_GET_CONST(bool, ctx.GetAttr("is_test"))); } DECLARE_NO_NEED_BUFFER_VARS_INFERER(TestNoNeedBufferVarsInferer, "X1", "X2"); diff --git a/paddle/fluid/framework/op_call_stack.cc b/paddle/fluid/framework/op_call_stack.cc index d320ced05012cd..3c3f93975f7d06 100644 --- a/paddle/fluid/framework/op_call_stack.cc +++ b/paddle/fluid/framework/op_call_stack.cc @@ -43,7 +43,7 @@ void InsertCallStackInfo(const std::string &type, const std::vector *callstack = nullptr; auto iter = attrs.find(OpProtoAndCheckerMaker::OpCreationCallstackAttrName()); if (iter != attrs.end()) { - callstack = &BOOST_GET_CONST(std::vector, iter->second); + callstack = &PADDLE_GET_CONST(std::vector, iter->second); if (callstack->empty()) callstack = nullptr; } diff --git a/paddle/fluid/framework/op_desc.cc b/paddle/fluid/framework/op_desc.cc index c0a9528c28126f..52ac86d0606940 100644 --- a/paddle/fluid/framework/op_desc.cc +++ b/paddle/fluid/framework/op_desc.cc @@ -587,7 +587,7 @@ void OpDesc::SetAttr(const std::string &name, const Attribute &v) { // here if we meet this issue proto::AttrType attr_type = static_cast(v.index() - 1); if (attr_type == proto::AttrType::INTS && - BOOST_GET_CONST(std::vector, v).size() == 0u) { + PADDLE_GET_CONST(std::vector, v).size() == 0u) { // Find current attr via attr name and set the correct attribute value const proto::OpProto::Attr &attr = GetProtoAttr(name); switch (attr.type()) { @@ -638,7 +638,7 @@ void OpDesc::SetAttr(const std::string &name, const Attribute &v) { // In order to set bool attr properly if (attr_type == proto::AttrType::INT && HasProtoAttr(name) && GetProtoAttr(name).type() == proto::AttrType::BOOLEAN) { - this->attrs_[name] = static_cast(BOOST_GET_CONST(int, v)); + this->attrs_[name] = static_cast(PADDLE_GET_CONST(int, v)); need_update_ = true; return; } @@ -703,7 +703,7 @@ std::vector OpDesc::GetBlocksAttrIds(const std::string &name) const { attrs_.end(), platform::errors::NotFound( "Attribute `%s` is not found in operator `%s`.", name, desc_.type())); - auto blocks = BOOST_GET_CONST(std::vector, it->second); + auto blocks = PADDLE_GET_CONST(std::vector, it->second); std::vector ids; for (auto n : blocks) { @@ -720,7 +720,7 @@ int OpDesc::GetBlockAttrId(const std::string &name) const { attrs_.end(), platform::errors::NotFound( "Attribute `%s` is not found in operator `%s`.", name, desc_.type())); - return BOOST_GET_CONST(BlockDesc *, it->second)->ID(); + return PADDLE_GET_CONST(BlockDesc *, it->second)->ID(); } const std::unordered_map &OpDesc::GetAttrMap() const { @@ -742,7 +742,7 @@ void OpDesc::RenameOutput(const std::string &old_name, auto it = attrs_.find(framework::OpProtoAndCheckerMaker::OpRoleVarAttrName()); if (it != attrs_.end()) { - auto &op_vars = BOOST_GET(std::vector, it->second); + auto &op_vars = PADDLE_GET(std::vector, it->second); std::replace(op_vars.begin(), op_vars.end(), old_name, new_name); } @@ -757,7 +757,7 @@ void OpDesc::RenameInput(const std::string &old_name, auto it = attrs_.find(framework::OpProtoAndCheckerMaker::OpRoleVarAttrName()); if (it != attrs_.end()) { - auto &op_vars = BOOST_GET(std::vector, it->second); + auto &op_vars = PADDLE_GET(std::vector, it->second); std::replace(op_vars.begin(), op_vars.end(), old_name, new_name); } diff --git a/paddle/fluid/framework/op_desc.h b/paddle/fluid/framework/op_desc.h index 35dd7e44e2e955..02186a02e3d835 100644 --- a/paddle/fluid/framework/op_desc.h +++ b/paddle/fluid/framework/op_desc.h @@ -95,7 +95,7 @@ class OpDesc { T GetAttrIfExists(const std::string &name) const { T result{}; if (HasAttr(name)) { - result = BOOST_GET_CONST(T, GetAttr(name)); + result = PADDLE_GET_CONST(T, GetAttr(name)); } return result; } diff --git a/paddle/fluid/framework/operator.cc b/paddle/fluid/framework/operator.cc index 4f50996267b97b..c8dc5f059c7c53 100644 --- a/paddle/fluid/framework/operator.cc +++ b/paddle/fluid/framework/operator.cc @@ -1347,7 +1347,7 @@ bool OperatorWithKernel::CanMKLDNNBeUsed(const framework::ExecutionContext& ctx, const auto& attrs_map = ctx.Attrs(); auto iter = attrs_map.find("use_mkldnn"); bool use_mkldnn_ctx = iter != attrs_map.end() && - BOOST_GET_CONST(bool, iter->second) && + PADDLE_GET_CONST(bool, iter->second) && platform::is_cpu_place(ctx.GetPlace()); return use_mkldnn_ctx && this->SupportsMKLDNN(data_type); } @@ -2643,15 +2643,15 @@ void OperatorWithKernel::BuildPhiKernelContext( switch (AttrTypeID(attr_iter->second)) { case proto::AttrType::FLOAT: pt_kernel_context->EmplaceBackAttr(std::move( - phi::Scalar(BOOST_GET_CONST(float, attr_iter->second)))); + phi::Scalar(PADDLE_GET_CONST(float, attr_iter->second)))); break; case proto::AttrType::INT: pt_kernel_context->EmplaceBackAttr(std::move( - phi::Scalar(BOOST_GET_CONST(int, attr_iter->second)))); + phi::Scalar(PADDLE_GET_CONST(int, attr_iter->second)))); break; case proto::AttrType::STRING: pt_kernel_context->EmplaceBackAttr(std::move(phi::Scalar( - BOOST_GET_CONST(std::string, attr_iter->second)))); + PADDLE_GET_CONST(std::string, attr_iter->second)))); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -2671,19 +2671,19 @@ void OperatorWithKernel::BuildPhiKernelContext( switch (AttrTypeID(attr_iter->second)) { case proto::AttrType::INTS: pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray( - BOOST_GET_CONST(std::vector, attr_iter->second)))); + PADDLE_GET_CONST(std::vector, attr_iter->second)))); break; case proto::AttrType::LONGS: pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray( - BOOST_GET_CONST(std::vector, attr_iter->second)))); + PADDLE_GET_CONST(std::vector, attr_iter->second)))); break; case proto::AttrType::INT: pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray( - &BOOST_GET_CONST(int32_t, attr_iter->second), 1))); + &PADDLE_GET_CONST(int32_t, attr_iter->second), 1))); break; case proto::AttrType::LONG: pt_kernel_context->EmplaceBackAttr(std::move(phi::IntArray( - &BOOST_GET_CONST(int64_t, attr_iter->second), 1))); + &PADDLE_GET_CONST(int64_t, attr_iter->second), 1))); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -2713,7 +2713,7 @@ void OperatorWithKernel::BuildPhiKernelContext( switch (AttrTypeID(attr_iter->second)) { case proto::AttrType::INTS: { const auto& vec = - BOOST_GET_CONST(std::vector, attr_iter->second); + PADDLE_GET_CONST(std::vector, attr_iter->second); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -2723,7 +2723,7 @@ void OperatorWithKernel::BuildPhiKernelContext( } break; case proto::AttrType::LONGS: { const auto& vec = - BOOST_GET_CONST(std::vector, attr_iter->second); + PADDLE_GET_CONST(std::vector, attr_iter->second); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -2733,7 +2733,7 @@ void OperatorWithKernel::BuildPhiKernelContext( } break; case proto::AttrType::FLOATS: { const auto& vec = - BOOST_GET_CONST(std::vector, attr_iter->second); + PADDLE_GET_CONST(std::vector, attr_iter->second); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -2743,7 +2743,7 @@ void OperatorWithKernel::BuildPhiKernelContext( } break; case proto::AttrType::FLOAT64S: { const auto& vec = - BOOST_GET_CONST(std::vector, attr_iter->second); + PADDLE_GET_CONST(std::vector, attr_iter->second); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -2753,7 +2753,7 @@ void OperatorWithKernel::BuildPhiKernelContext( } break; case proto::AttrType::BOOLEANS: { const auto& vec = - BOOST_GET_CONST(std::vector, attr_iter->second); + PADDLE_GET_CONST(std::vector, attr_iter->second); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -2778,43 +2778,43 @@ void OperatorWithKernel::BuildPhiKernelContext( switch (attr_defs[i].type_index) { case phi::AttributeType::FLOAT32: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(float, attr_iter->second)); + PADDLE_GET_CONST(float, attr_iter->second)); break; case phi::AttributeType::INT32: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(int, attr_iter->second)); + PADDLE_GET_CONST(int, attr_iter->second)); break; case phi::AttributeType::BOOL: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(bool, attr_iter->second)); + PADDLE_GET_CONST(bool, attr_iter->second)); break; case phi::AttributeType::INT64: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(int64_t, attr_iter->second)); + PADDLE_GET_CONST(int64_t, attr_iter->second)); break; case phi::AttributeType::INT32S: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr_iter->second)); + PADDLE_GET_CONST(std::vector, attr_iter->second)); break; case phi::AttributeType::DATA_TYPE: { auto data_type = framework::TransToPhiDataType( static_cast( - BOOST_GET_CONST(int, attr_iter->second))); + PADDLE_GET_CONST(int, attr_iter->second))); pt_kernel_context->EmplaceBackAttr(data_type); } break; case phi::AttributeType::STRING: pt_kernel_context->EmplaceBackAttr( - std::move(BOOST_GET_CONST(std::string, attr_iter->second))); + std::move(PADDLE_GET_CONST(std::string, attr_iter->second))); break; case phi::AttributeType::INT64S: switch (AttrTypeID(attr_iter->second)) { case proto::AttrType::LONGS: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr_iter->second)); + PADDLE_GET_CONST(std::vector, attr_iter->second)); break; case proto::AttrType::INTS: { const auto& vector_int_attr = - BOOST_GET_CONST(std::vector, attr_iter->second); + PADDLE_GET_CONST(std::vector, attr_iter->second); const std::vector vector_int64_attr( vector_int_attr.begin(), vector_int_attr.end()); pt_kernel_context->EmplaceBackAttr(vector_int64_attr); @@ -2829,11 +2829,11 @@ void OperatorWithKernel::BuildPhiKernelContext( break; case phi::AttributeType::FLOAT32S: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr_iter->second)); + PADDLE_GET_CONST(std::vector, attr_iter->second)); break; case phi::AttributeType::STRINGS: pt_kernel_context->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr_iter->second)); + PADDLE_GET_CONST(std::vector, attr_iter->second)); break; default: PADDLE_THROW(platform::errors::Unimplemented( diff --git a/paddle/fluid/framework/operator.h b/paddle/fluid/framework/operator.h index c3827f56c7197b..be3259c7b1d74d 100644 --- a/paddle/fluid/framework/operator.h +++ b/paddle/fluid/framework/operator.h @@ -183,7 +183,7 @@ class OperatorBase { attrs_.find(name), attrs_.end(), platform::errors::NotFound("(%s) is not found in AttributeMap.", name)); - return BOOST_GET_CONST(T, attrs_.at(name)); + return PADDLE_GET_CONST(T, attrs_.at(name)); } void SetAttr(const std::string& name, const Attribute& v) { PADDLE_ENFORCE_EQ( @@ -297,7 +297,7 @@ class ExecutionContext { template inline const T& Attr(const std::string& name) const { - return BOOST_GET_CONST(T, GetAttr(name)); + return PADDLE_GET_CONST(T, GetAttr(name)); } virtual const Attribute& GetAttr(const std::string& name) const { diff --git a/paddle/fluid/framework/operator_test.cc b/paddle/fluid/framework/operator_test.cc index ba7a5956ae0fd4..b5aaa22e86ee20 100644 --- a/paddle/fluid/framework/operator_test.cc +++ b/paddle/fluid/framework/operator_test.cc @@ -543,7 +543,7 @@ TEST(ExecutionContextAttrAndInOut, new_api) { ASSERT_EQ(exe_context.OutputSize("output"), 1u); auto attr_map = exe_context.Attrs(); - ASSERT_EQ(BOOST_GET(float, attr_map["scale"]), 3.14f); + ASSERT_EQ(PADDLE_GET(float, attr_map["scale"]), 3.14f); ASSERT_EQ(exe_context.Type(), "test_operator"); } diff --git a/paddle/fluid/framework/paddle2cinn/build_cinn_pass.cc b/paddle/fluid/framework/paddle2cinn/build_cinn_pass.cc index 7a5ecbce1bc9c7..b25d3a7f3af92f 100644 --- a/paddle/fluid/framework/paddle2cinn/build_cinn_pass.cc +++ b/paddle/fluid/framework/paddle2cinn/build_cinn_pass.cc @@ -131,7 +131,7 @@ int ExtractOpRole(const GraphNodeSet& cluster) { std::string attr_name = OpProtoAndCheckerMaker::OpRoleAttrName(); for (auto* n : cluster) { if (n->Op() && n->Op()->HasAttr(attr_name)) { - op_roles.insert(BOOST_GET_CONST(int, n->Op()->GetAttr(attr_name))); + op_roles.insert(PADDLE_GET_CONST(int, n->Op()->GetAttr(attr_name))); } } if (op_roles.size() == 1U) { diff --git a/paddle/fluid/framework/paddle2cinn/build_cinn_pass_test.cc b/paddle/fluid/framework/paddle2cinn/build_cinn_pass_test.cc index 1613088f82512f..f951a09cfd56af 100644 --- a/paddle/fluid/framework/paddle2cinn/build_cinn_pass_test.cc +++ b/paddle/fluid/framework/paddle2cinn/build_cinn_pass_test.cc @@ -94,7 +94,7 @@ std::vector GetCompilationKeys(const Graph& graph) { std::vector compilation_keys; for (auto& node : graph.Nodes()) { if (node->IsOp() && node->Name() == kCinnLaunchOp) { - compilation_keys.emplace_back(BOOST_GET_CONST( + compilation_keys.emplace_back(PADDLE_GET_CONST( int64_t, node->Op()->GetAttr(operators::kCompilationKey))); } } diff --git a/paddle/fluid/framework/paddle2cinn/cinn_compiler_test.cc b/paddle/fluid/framework/paddle2cinn/cinn_compiler_test.cc index 69e0c949b5fe35..db2f899101de34 100644 --- a/paddle/fluid/framework/paddle2cinn/cinn_compiler_test.cc +++ b/paddle/fluid/framework/paddle2cinn/cinn_compiler_test.cc @@ -63,7 +63,7 @@ std::vector GetCompilationKeys(const Graph& graph) { std::vector compilation_keys; for (auto& node : graph.Nodes()) { if (node->IsOp() && node->Name() == kCinnLaunchOp) { - compilation_keys.emplace_back(BOOST_GET_CONST( + compilation_keys.emplace_back(PADDLE_GET_CONST( int64_t, node->Op()->GetAttr(operators::kCompilationKey))); } } @@ -87,8 +87,8 @@ std::unordered_map> GetInputsInfo( std::unordered_set inputs; for (auto& node : graph.Nodes()) { if (node->IsOp() && node->Name() == kCinnLaunchOp) { - if (BOOST_GET_CONST(int64_t, - node->Op()->GetAttr(operators::kCompilationKey)) != + if (PADDLE_GET_CONST(int64_t, + node->Op()->GetAttr(operators::kCompilationKey)) != key) { continue; } diff --git a/paddle/fluid/framework/parallel_executor.cc b/paddle/fluid/framework/parallel_executor.cc index 4e6aeaeb7ac6a5..4ad966887f399f 100644 --- a/paddle/fluid/framework/parallel_executor.cc +++ b/paddle/fluid/framework/parallel_executor.cc @@ -995,7 +995,7 @@ FetchUnmergedList ParallelExecutor::Run( VLOG(3) << "ParallelExecutor begin to run member_->executor_->Run"; auto fetch_data = member_->executor_->Run(fetch_tensors, /*return_merged=*/false); - return BOOST_GET(FetchUnmergedList, fetch_data); + return PADDLE_GET(FetchUnmergedList, fetch_data); } FetchList ParallelExecutor::RunAndMerge( @@ -1012,7 +1012,7 @@ FetchList ParallelExecutor::RunAndMerge( VLOG(3) << "ParallelExecutor begin to run member_->executor_->RunAndMerge"; auto fetch_data = member_->executor_->Run(fetch_tensors, /*return_merged=*/true); - return BOOST_GET(FetchList, fetch_data); + return PADDLE_GET(FetchList, fetch_data); } void ParallelExecutor::RunWithoutFetch( diff --git a/paddle/fluid/framework/program_desc.cc b/paddle/fluid/framework/program_desc.cc index 5e72a6c113f04f..b184bc8be36813 100644 --- a/paddle/fluid/framework/program_desc.cc +++ b/paddle/fluid/framework/program_desc.cc @@ -74,7 +74,7 @@ ProgramDesc::ProgramDesc(const ProgramDesc &o) { for (const std::string &attr_name : op->AttrNames()) { if (op->GetAttrType(attr_name) == proto::AttrType::BLOCK) { framework::BlockDesc *block_desc = - BOOST_GET_CONST(framework::BlockDesc *, op->GetAttr(attr_name)); + PADDLE_GET_CONST(framework::BlockDesc *, op->GetAttr(attr_name)); if (std::find(old_block_desc.begin(), old_block_desc.end(), block_desc) != old_block_desc.end()) { @@ -152,7 +152,7 @@ const std::vector ProgramDesc::GetFeedTargetNames() { std::vector feed_target_names; for (auto *op : global_block.AllOps()) { if (op->Type() == kFeedOpType) { - size_t col = BOOST_GET_CONST(int, op->GetAttr("col")); + size_t col = PADDLE_GET_CONST(int, op->GetAttr("col")); if (col >= feed_target_names.size()) { feed_target_names.resize(col + 1); } @@ -169,7 +169,7 @@ const std::vector ProgramDesc::GetFetchTargetNames() { std::vector fetch_target_names; for (auto *op : global_block.AllOps()) { if (op->Type() == kFetchOpType) { - size_t col = BOOST_GET_CONST(int, op->GetAttr("col")); + size_t col = PADDLE_GET_CONST(int, op->GetAttr("col")); if (col >= fetch_target_names.size()) { fetch_target_names.resize(col + 1); } diff --git a/paddle/fluid/framework/program_processing.cc b/paddle/fluid/framework/program_processing.cc index 6dcfd470354fa1..ba50874c420a25 100644 --- a/paddle/fluid/framework/program_processing.cc +++ b/paddle/fluid/framework/program_processing.cc @@ -79,7 +79,7 @@ void ProgramProcessor::AddDepToBlockOp(const BlockDesc &block) { if (op->HasAttr("sub_block")) { auto op_type = op->Type(); BlockDesc *sub_block = - BOOST_GET_MUTABLE(BlockDesc *, op->GetAttr("sub_block")); + PADDLE_GET_MUTABLE(BlockDesc *, op->GetAttr("sub_block")); // recursively processing AddDepToBlockOp(*sub_block); diff --git a/paddle/fluid/framework/prune.cc b/paddle/fluid/framework/prune.cc index b5d63614d0c218..ede6a99c43678c 100644 --- a/paddle/fluid/framework/prune.cc +++ b/paddle/fluid/framework/prune.cc @@ -458,7 +458,7 @@ std::tuple> PruneBackward( for (size_t i = 0; i < origin_clone.Size(); i++) { auto block_ops = origin_clone.Block(i).AllOps(); for (auto op : block_ops) { - int op_role = BOOST_GET_MUTABLE( + int op_role = PADDLE_GET_MUTABLE( int, op->GetAttr(OpProtoAndCheckerMaker::OpRoleAttrName())); if (op_role == (static_cast(OpRole::kBackward) | static_cast(OpRole::kLoss))) { diff --git a/paddle/fluid/framework/tuple_test.cc b/paddle/fluid/framework/tuple_test.cc index cf85b544fe7a8a..6b50672bb4d1ee 100644 --- a/paddle/fluid/framework/tuple_test.cc +++ b/paddle/fluid/framework/tuple_test.cc @@ -23,9 +23,9 @@ TEST(Tuple, Make) { paddle::framework::Tuple* tuple = paddle::framework::make_tuple(element_type); - EXPECT_EQ(BOOST_GET(int, tuple->get(0)), 12); - EXPECT_EQ(BOOST_GET(float, tuple->get(1)), 12.0f); - EXPECT_EQ(BOOST_GET(std::string, tuple->get(2)), "ElementVar"); + EXPECT_EQ(PADDLE_GET(int, tuple->get(0)), 12); + EXPECT_EQ(PADDLE_GET(float, tuple->get(1)), 12.0f); + EXPECT_EQ(PADDLE_GET(std::string, tuple->get(2)), "ElementVar"); delete tuple; } diff --git a/paddle/fluid/framework/var_desc.cc b/paddle/fluid/framework/var_desc.cc index 55e2ae0969373a..1350d28d1de36d 100644 --- a/paddle/fluid/framework/var_desc.cc +++ b/paddle/fluid/framework/var_desc.cc @@ -317,7 +317,7 @@ void VarDesc::SetAttr(const std::string &name, const Attribute &v) { // here if we meet this issue proto::AttrType attr_type = static_cast(v.index() - 1); if (attr_type == proto::AttrType::INTS && - BOOST_GET_CONST(std::vector, v).size() == 0u) { + PADDLE_GET_CONST(std::vector, v).size() == 0u) { // Find current attr via attr name and set the correct attribute value this->attrs_[name] = std::vector(); return; diff --git a/paddle/fluid/imperative/dygraph_grad_maker.h b/paddle/fluid/imperative/dygraph_grad_maker.h index 6af8d3e04420f6..9250d0cbd463c1 100644 --- a/paddle/fluid/imperative/dygraph_grad_maker.h +++ b/paddle/fluid/imperative/dygraph_grad_maker.h @@ -137,7 +137,7 @@ class GradOpBaseMakerBase { template inline const T& Attr(const std::string& name) const { - return BOOST_GET_CONST(T, GetAttr(name)); + return PADDLE_GET_CONST(T, GetAttr(name)); } const std::string& ForwardOpType() const { return type_; } diff --git a/paddle/fluid/imperative/layout_autotune.cc b/paddle/fluid/imperative/layout_autotune.cc index 10a4a2e69d5402..f37105e2581033 100644 --- a/paddle/fluid/imperative/layout_autotune.cc +++ b/paddle/fluid/imperative/layout_autotune.cc @@ -174,10 +174,10 @@ paddle::imperative::NameVarMap AutoTuneLayout( conv_in_type = framework::proto::VarType::FP16; } bool is_tune_fp32 = - (BOOST_GET_CONST(std::string, (*attrs)["data_format"]) == "NHWC") && + (PADDLE_GET_CONST(std::string, (*attrs)["data_format"]) == "NHWC") && (conv_in_type == framework::proto::VarType::FP32); bool is_tune_fp16 = - (BOOST_GET_CONST(std::string, (*attrs)["data_format"]) == "NCHW") && + (PADDLE_GET_CONST(std::string, (*attrs)["data_format"]) == "NCHW") && (conv_in_type == framework::proto::VarType::FP16); if (is_tune_fp32) { LayoutAutoTune::Instance().SetDesiredLayout(DataLayout::NCHW); @@ -188,7 +188,8 @@ paddle::imperative::NameVarMap AutoTuneLayout( return ins; } VLOG(3) << "Tune the layout from " - << BOOST_GET_CONST(std::string, (*attrs)["data_format"]) << " to " + << PADDLE_GET_CONST(std::string, (*attrs)["data_format"]) + << " to " << paddle::framework::DataLayoutToString( LayoutAutoTune::Instance().GetDesiredLayout()); } diff --git a/paddle/fluid/imperative/layout_transformer.h b/paddle/fluid/imperative/layout_transformer.h index fa7261b6d52b62..401b37a428e948 100644 --- a/paddle/fluid/imperative/layout_transformer.h +++ b/paddle/fluid/imperative/layout_transformer.h @@ -166,17 +166,17 @@ class HeavilyLayoutSensitiveOpTransformer : public LayoutTransformer { std::string desired_layout_str = paddle::framework::DataLayoutToString( LayoutAutoTune::Instance().GetDesiredLayout()); if (attrs->find("data_format") != attrs->end() && - BOOST_GET_CONST(std::string, (*attrs)["data_format"]) != + PADDLE_GET_CONST(std::string, (*attrs)["data_format"]) != desired_layout_str) { VLOG(4) << "Origin layout attr: " - << BOOST_GET_CONST(std::string, (*attrs)["data_format"]) + << PADDLE_GET_CONST(std::string, (*attrs)["data_format"]) << ", Desired layout attr: " << desired_layout_str; (*attrs)["data_format"] = desired_layout_str; } else if (attrs->find("data_layout") != attrs->end() && - BOOST_GET_CONST(std::string, (*attrs)["data_layout"]) != + PADDLE_GET_CONST(std::string, (*attrs)["data_layout"]) != desired_layout_str) { VLOG(4) << "Origin layout attr: " - << BOOST_GET_CONST(std::string, (*attrs)["data_layout"]) + << PADDLE_GET_CONST(std::string, (*attrs)["data_layout"]) << ", Desired layout attr: " << desired_layout_str; (*attrs)["data_layout"] = desired_layout_str; } @@ -273,7 +273,7 @@ class ElementwiseOpTransformer auto in_layout = paddle::imperative::GetDataLayout(in1_vars); // for conv's bias if (attrs->find("axis") != attrs->end() && - BOOST_GET_CONST(int, (*attrs)["axis"]) != -1) { + PADDLE_GET_CONST(int, (*attrs)["axis"]) != -1) { if (in_layout == DataLayout::NHWC) { (*attrs)["axis"] = 3; } else if (in_layout == DataLayout::NCHW) { @@ -315,7 +315,7 @@ class TransposeOpTransformer auto var_layout = paddle::imperative::GetDataLayout(in_var); auto desired_layout = LayoutAutoTune::Instance().GetDesiredLayout(); if (var_layout == desired_layout && desired_layout == DataLayout::NHWC) { - auto axis = BOOST_GET_CONST(std::vector, (*attrs)["axis"]); + auto axis = PADDLE_GET_CONST(std::vector, (*attrs)["axis"]); // NHWC->NCHW, permutaion will be set as follows. std::vector perm = {0, 3, 1, 2}; // fuse the transpose Ops by transforming axis. @@ -343,8 +343,8 @@ class FlattenOpTransformer // Flatten the C, H, W dimensions will not affect functionality. // So transformation is unnecessary. But in other cases, it needs to // fall back to the LightlyLayoutSensitiveOpTransformer. - auto start_axis = BOOST_GET_CONST(int, (*attrs)["start_axis"]); - auto stop_axis = BOOST_GET_CONST(int, (*attrs)["stop_axis"]); + auto start_axis = PADDLE_GET_CONST(int, (*attrs)["start_axis"]); + auto stop_axis = PADDLE_GET_CONST(int, (*attrs)["stop_axis"]); if (paddle::imperative::GetDataLayout(ins.at("X")[0]) == LayoutAutoTune::Instance().GetDesiredLayout() && start_axis == 1 && stop_axis == 3) { @@ -371,7 +371,7 @@ class ArgmaxOpTransformer VLOG(3) << "Optimze lightly layout sensitive op " << this->Type(); auto& in_var = ins.at("X")[0]; auto var_layout = paddle::imperative::GetDataLayout(in_var); - bool keep_dims = BOOST_GET_CONST(bool, (*attrs)["keepdims"]); + bool keep_dims = PADDLE_GET_CONST(bool, (*attrs)["keepdims"]); if (keep_dims) { if (var_layout != DataLayout::UNDEFINED) { std::vector perm_nhwc = {0, 3, 1, 2}; @@ -380,11 +380,11 @@ class ArgmaxOpTransformer auto perm = var_layout == DataLayout::NHWC ? perm_nhwc : perm_nchw; switch (AttrTypeID((*attrs)["axis"])) { case paddle::framework::proto::AttrType::INT: { - auto axis = BOOST_GET_CONST(int, (*attrs)["axis"]); + auto axis = PADDLE_GET_CONST(int, (*attrs)["axis"]); (*attrs)["axis"] = static_cast(perm[axis]); } case paddle::framework::proto::AttrType::LONG: { - auto axis = BOOST_GET_CONST(int64_t, (*attrs)["axis"]); + auto axis = PADDLE_GET_CONST(int64_t, (*attrs)["axis"]); (*attrs)["axis"] = static_cast(perm[axis]); } default: @@ -436,10 +436,10 @@ class ConcatOpTransformer std::vector perm_nhwc = {0, 3, 1, 2}; std::vector perm_nchw = {0, 2, 3, 1}; auto perm = var_layout == DataLayout::NHWC ? perm_nhwc : perm_nchw; - auto axis = BOOST_GET_CONST(int, (*attrs)["axis"]); + auto axis = PADDLE_GET_CONST(int, (*attrs)["axis"]); (*attrs)["axis"] = static_cast(perm[axis]); } - auto axis = BOOST_GET_CONST(int, (*attrs)["axis"]); + auto axis = PADDLE_GET_CONST(int, (*attrs)["axis"]); VLOG(3) << "Optimze lightly layout sensitive op asdfasdfasdf axis" << axis; this->SetVarsLayout(outs, var_layout); diff --git a/paddle/fluid/imperative/op_base.h b/paddle/fluid/imperative/op_base.h index b8e7bff8480f0f..04d6b4bd7606bf 100644 --- a/paddle/fluid/imperative/op_base.h +++ b/paddle/fluid/imperative/op_base.h @@ -147,7 +147,7 @@ class OpBase { template inline const T& Attr(const std::string& name) const { - return BOOST_GET_CONST(T, GetAttr(name)); + return PADDLE_GET_CONST(T, GetAttr(name)); } size_t id() const { return id_; } diff --git a/paddle/fluid/imperative/prepared_operator.h b/paddle/fluid/imperative/prepared_operator.h index c0ff0914401b55..c2b23f31d1daf2 100644 --- a/paddle/fluid/imperative/prepared_operator.h +++ b/paddle/fluid/imperative/prepared_operator.h @@ -410,15 +410,15 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, switch (AttrTypeID(attr)) { case framework::proto::AttrType::FLOAT: kernel_ctx->EmplaceBackAttr( - std::move(phi::Scalar(BOOST_GET_CONST(float, attr)))); + std::move(phi::Scalar(PADDLE_GET_CONST(float, attr)))); break; case framework::proto::AttrType::INT: kernel_ctx->EmplaceBackAttr( - std::move(phi::Scalar(BOOST_GET_CONST(int, attr)))); + std::move(phi::Scalar(PADDLE_GET_CONST(int, attr)))); break; case framework::proto::AttrType::STRING: kernel_ctx->EmplaceBackAttr( - std::move(phi::Scalar(BOOST_GET_CONST(std::string, attr)))); + std::move(phi::Scalar(PADDLE_GET_CONST(std::string, attr)))); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -438,19 +438,19 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, switch (AttrTypeID(attr)) { case framework::proto::AttrType::INTS: kernel_ctx->EmplaceBackAttr(std::move( - phi::IntArray(BOOST_GET_CONST(std::vector, attr)))); + phi::IntArray(PADDLE_GET_CONST(std::vector, attr)))); break; case framework::proto::AttrType::LONGS: kernel_ctx->EmplaceBackAttr(std::move( - phi::IntArray(BOOST_GET_CONST(std::vector, attr)))); + phi::IntArray(PADDLE_GET_CONST(std::vector, attr)))); break; case framework::proto::AttrType::INT: - kernel_ctx->EmplaceBackAttr( - std::move(phi::IntArray(&BOOST_GET_CONST(int32_t, attr), 1))); + kernel_ctx->EmplaceBackAttr(std::move( + phi::IntArray(&PADDLE_GET_CONST(int32_t, attr), 1))); break; case framework::proto::AttrType::LONG: - kernel_ctx->EmplaceBackAttr( - std::move(phi::IntArray(&BOOST_GET_CONST(int64_t, attr), 1))); + kernel_ctx->EmplaceBackAttr(std::move( + phi::IntArray(&PADDLE_GET_CONST(int64_t, attr), 1))); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -483,7 +483,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, auto& attr = *attr_ptr; switch (AttrTypeID(attr)) { case framework::proto::AttrType::INTS: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -492,7 +492,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, kernel_ctx->EmplaceBackAttr(std::move(scalar_list)); } break; case framework::proto::AttrType::LONGS: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -501,7 +501,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, kernel_ctx->EmplaceBackAttr(std::move(scalar_list)); } break; case framework::proto::AttrType::FLOATS: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -510,7 +510,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, kernel_ctx->EmplaceBackAttr(std::move(scalar_list)); } break; case framework::proto::AttrType::FLOAT64S: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -519,7 +519,7 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, kernel_ctx->EmplaceBackAttr(std::move(scalar_list)); } break; case framework::proto::AttrType::BOOLEANS: { - const auto& vec = BOOST_GET_CONST(std::vector, attr); + const auto& vec = PADDLE_GET_CONST(std::vector, attr); std::vector scalar_list; scalar_list.reserve(vec.size()); for (const auto& val : vec) { @@ -543,40 +543,40 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, auto& attr = *attr_ptr; switch (attr_defs[i].type_index) { case phi::AttributeType::FLOAT32: - kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(float, attr)); + kernel_ctx->EmplaceBackAttr(PADDLE_GET_CONST(float, attr)); break; case phi::AttributeType::INT32: - kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(int, attr)); + kernel_ctx->EmplaceBackAttr(PADDLE_GET_CONST(int, attr)); break; case phi::AttributeType::BOOL: - kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(bool, attr)); + kernel_ctx->EmplaceBackAttr(PADDLE_GET_CONST(bool, attr)); break; case phi::AttributeType::INT64: - kernel_ctx->EmplaceBackAttr(BOOST_GET_CONST(int64_t, attr)); + kernel_ctx->EmplaceBackAttr(PADDLE_GET_CONST(int64_t, attr)); break; case phi::AttributeType::INT32S: kernel_ctx->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case phi::AttributeType::DATA_TYPE: { auto data_type = framework::TransToPhiDataType( static_cast( - BOOST_GET_CONST(int, attr))); + PADDLE_GET_CONST(int, attr))); kernel_ctx->EmplaceBackAttr(data_type); } break; case phi::AttributeType::STRING: kernel_ctx->EmplaceBackAttr( - std::move(BOOST_GET_CONST(std::string, attr))); + std::move(PADDLE_GET_CONST(std::string, attr))); break; case phi::AttributeType::INT64S: { switch (AttrTypeID(attr)) { case framework::proto::AttrType::LONGS: kernel_ctx->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case framework::proto::AttrType::INTS: { const auto& vector_int_attr = - BOOST_GET_CONST(std::vector, attr); + PADDLE_GET_CONST(std::vector, attr); const std::vector vector_int64_attr( vector_int_attr.begin(), vector_int_attr.end()); kernel_ctx->EmplaceBackAttr(vector_int64_attr); @@ -591,11 +591,11 @@ void BuildDygraphPhiKernelContext(const phi::KernelSignature& kernel_signature, } break; case phi::AttributeType::FLOAT32S: kernel_ctx->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; case phi::AttributeType::STRINGS: kernel_ctx->EmplaceBackAttr( - BOOST_GET_CONST(std::vector, attr)); + PADDLE_GET_CONST(std::vector, attr)); break; default: PADDLE_THROW(platform::errors::Unimplemented( diff --git a/paddle/fluid/imperative/tests/test_layer.cc b/paddle/fluid/imperative/tests/test_layer.cc index 1aa9939536a7e9..8f7d064db4b3e3 100644 --- a/paddle/fluid/imperative/tests/test_layer.cc +++ b/paddle/fluid/imperative/tests/test_layer.cc @@ -373,7 +373,7 @@ TEST(test_layer, test_dygraph_execution_context) { ASSERT_EQ(dy_exe_context.InputName("X"), "vin"); ASSERT_EQ(dy_exe_context.HasAttr("axis"), true); auto attr_map = dy_exe_context.Attrs(); - ASSERT_EQ(BOOST_GET(int, attr_map["axis"]), 1); + ASSERT_EQ(PADDLE_GET(int, attr_map["axis"]), 1); ASSERT_EQ(dy_exe_context.OutputSize("Out"), 1u); ASSERT_EQ(dy_exe_context.HasOutput("Out"), true); } diff --git a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc index f074bae46f9384..3410b229396a29 100644 --- a/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc +++ b/paddle/fluid/inference/analysis/ir_passes/subgraph_util.cc @@ -194,9 +194,9 @@ void RenameAndGetOutputs( auto out_var_name = op_desc.Output("Output").front(); auto filter_shape = in_vars[filter_var_name]->Var()->GetShape(); const std::vector strides = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); const std::vector paddings = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); if (same_hierarchy_conv2d_num_map[input_var_name] > 0) { (*output_names_with_id) .insert(out_var_name + std::to_string(var2id[out_var_name])); diff --git a/paddle/fluid/inference/api/analysis_predictor.cc b/paddle/fluid/inference/api/analysis_predictor.cc index 5e787394bce256..12c2a9475f31c0 100644 --- a/paddle/fluid/inference/api/analysis_predictor.cc +++ b/paddle/fluid/inference/api/analysis_predictor.cc @@ -969,7 +969,7 @@ bool AnalysisPredictor::SetFeed(const std::vector &inputs, } idx = feed_names_[name]; } else { - idx = BOOST_GET_CONST(int, feeds_[i]->GetAttr("col")); + idx = PADDLE_GET_CONST(int, feeds_[i]->GetAttr("col")); } framework::SetFeedVariable(scope, *input, "feed", idx); } @@ -1001,7 +1001,7 @@ bool AnalysisPredictor::GetFetch(std::vector *outputs, VLOG(3) << "Predictor::get_fetch"; outputs->resize(fetches_.size()); for (size_t i = 0; i < fetches_.size(); ++i) { - int idx = BOOST_GET_CONST(int, fetches_[i]->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, fetches_[i]->GetAttr("col")); PADDLE_ENFORCE_EQ( static_cast(idx), i, @@ -1011,7 +1011,7 @@ bool AnalysisPredictor::GetFetch(std::vector *outputs, i)); framework::FetchType &fetch_var = framework::GetFetchVariable(*scope, "fetch", idx); - auto &fetch = BOOST_GET(framework::LoDTensor, fetch_var); + auto &fetch = PADDLE_GET(framework::LoDTensor, fetch_var); auto type = framework::TransToProtoVarType(fetch.dtype()); auto output = &(outputs->at(i)); output->name = fetches_[idx]->Input("X")[0]; @@ -1242,9 +1242,9 @@ void AnalysisPredictor::OptimizeInferenceProgram() { for (auto &op_desc : block.AllOps()) { if (op_desc->Type() == "tensorrt_engine") { std::string engine_key = - BOOST_GET_CONST(std::string, op_desc->GetAttr("engine_key")); + PADDLE_GET_CONST(std::string, op_desc->GetAttr("engine_key")); int engine_predictor_id = - BOOST_GET_CONST(int, op_desc->GetAttr("predictor_id")); + PADDLE_GET_CONST(int, op_desc->GetAttr("predictor_id")); std::string engine_name = engine_key + std::to_string(engine_predictor_id); if (paddle::inference::Singleton< @@ -1396,7 +1396,7 @@ void AnalysisPredictor::PrepareFeedFetch() { CreateFeedFetchVar(sub_scope_); for (auto *op : inference_program_->Block(0).AllOps()) { if (op->Type() == "feed") { - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); if (feeds_.size() <= static_cast(idx)) { feeds_.resize(idx + 1); } @@ -1404,7 +1404,7 @@ void AnalysisPredictor::PrepareFeedFetch() { feed_names_[op->Output("Out")[0]] = idx; idx2feeds_[idx] = op->Output("Out")[0]; } else if (op->Type() == "fetch") { - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); if (fetches_.size() <= static_cast(idx)) { fetches_.resize(idx + 1); } @@ -1846,7 +1846,7 @@ bool AnalysisPredictor::SaveTrtCalibToDisk() { auto &block = inference_program_->Block(0); for (auto &op_desc : block.AllOps()) { if (op_desc->Type() == "tensorrt_engine") { - std::string engine_name = BOOST_GET_CONST( + std::string engine_name = PADDLE_GET_CONST( std::string, op_desc->GetAttr("calibration_engine_key")); if (!Singleton::Global().Has(engine_name)) { LOG(ERROR) << "You should run the predictor(with trt) on the real data " diff --git a/paddle/fluid/inference/api/api_impl.cc b/paddle/fluid/inference/api/api_impl.cc index ccef32bb84cd68..34dade3628a468 100644 --- a/paddle/fluid/inference/api/api_impl.cc +++ b/paddle/fluid/inference/api/api_impl.cc @@ -43,14 +43,14 @@ std::string num2str(T a) { void NativePaddlePredictor::PrepareFeedFetch() { for (auto *op : inference_program_->Block(0).AllOps()) { if (op->Type() == "feed") { - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); if (feeds_.size() <= static_cast(idx)) { feeds_.resize(idx + 1); } feeds_[idx] = op; feed_names_[op->Output("Out")[0]] = idx; } else if (op->Type() == "fetch") { - int idx = BOOST_GET_CONST(int, op->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, op->GetAttr("col")); if (fetchs_.size() <= static_cast(idx)) { fetchs_.resize(idx + 1); } @@ -302,7 +302,7 @@ bool NativePaddlePredictor::SetFeed(const std::vector &inputs, if (config_.specify_input_name) { idx = feed_names_[inputs[i].name]; } else { - idx = BOOST_GET_CONST(int, feeds_[i]->GetAttr("col")); + idx = PADDLE_GET_CONST(int, feeds_[i]->GetAttr("col")); } framework::SetFeedVariable(scope, input, "feed", idx); } @@ -333,7 +333,7 @@ bool NativePaddlePredictor::GetFetch(std::vector *outputs, VLOG(3) << "Predictor::get_fetch"; outputs->resize(fetchs_.size()); for (size_t i = 0; i < fetchs_.size(); ++i) { - int idx = BOOST_GET_CONST(int, fetchs_[i]->GetAttr("col")); + int idx = PADDLE_GET_CONST(int, fetchs_[i]->GetAttr("col")); PADDLE_ENFORCE_EQ( static_cast(idx), i, @@ -343,7 +343,7 @@ bool NativePaddlePredictor::GetFetch(std::vector *outputs, i)); framework::FetchType &fetch_var = framework::GetFetchVariable(*scope, "fetch", idx); - auto fetch = BOOST_GET_CONST(framework::LoDTensor, fetch_var); + auto fetch = PADDLE_GET_CONST(framework::LoDTensor, fetch_var); auto type = framework::TransToProtoVarType(fetch.dtype()); auto output = &(outputs->at(i)); output->name = fetchs_[idx]->Input("X")[0]; diff --git a/paddle/fluid/inference/api/api_impl_tester.cc b/paddle/fluid/inference/api/api_impl_tester.cc index 690f30583103cf..7a9621b385b2e8 100644 --- a/paddle/fluid/inference/api/api_impl_tester.cc +++ b/paddle/fluid/inference/api/api_impl_tester.cc @@ -111,7 +111,7 @@ void MainWord2Vec(const paddle::PaddlePlace& place) { TestInference(config.model_dir, cpu_feeds, cpu_fetchs1); - auto output1_tensor = BOOST_GET(paddle::framework::LoDTensor, output1); + auto output1_tensor = PADDLE_GET(paddle::framework::LoDTensor, output1); float* lod_data = output1_tensor.data(); for (int i = 0; i < output1_tensor.numel(); ++i) { EXPECT_LT(lod_data[i] - data[i], ACC_DIFF); @@ -160,7 +160,7 @@ void MainImageClassification(const paddle::PaddlePlace& place) { size_t len = outputs[0].data.length(); float* data = static_cast(outputs[0].data.data()); float* lod_data = - BOOST_GET(paddle::framework::LoDTensor, output1).data(); + PADDLE_GET(paddle::framework::LoDTensor, output1).data(); for (size_t j = 0; j < len / sizeof(float); ++j) { EXPECT_NEAR(lod_data[j], data[j], ACC_DIFF); } @@ -216,7 +216,7 @@ void MainThreadsWord2Vec(const paddle::PaddlePlace& place) { } // check outputs correctness - auto ref_tensor = BOOST_GET(paddle::framework::LoDTensor, refs[tid]); + auto ref_tensor = PADDLE_GET(paddle::framework::LoDTensor, refs[tid]); float* ref_data = ref_tensor.data(); EXPECT_EQ(ref_tensor.numel(), static_cast(len / sizeof(float))); for (int i = 0; i < ref_tensor.numel(); ++i) { @@ -271,7 +271,7 @@ void MainThreadsImageClassification(const paddle::PaddlePlace& place) { ASSERT_EQ(local_outputs.size(), 1UL); const size_t len = local_outputs[0].data.length(); float* data = static_cast(local_outputs[0].data.data()); - auto ref_tensor = BOOST_GET(paddle::framework::LoDTensor, refs[tid]); + auto ref_tensor = PADDLE_GET(paddle::framework::LoDTensor, refs[tid]); float* ref_data = ref_tensor.data(); EXPECT_EQ((size_t)ref_tensor.numel(), len / sizeof(float)); for (int i = 0; i < ref_tensor.numel(); ++i) { diff --git a/paddle/fluid/inference/tensorrt/convert/activation_op.cc b/paddle/fluid/inference/tensorrt/convert/activation_op.cc index bc6bb40e8a42d9..6d34ae94b2022e 100644 --- a/paddle/fluid/inference/tensorrt/convert/activation_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/activation_op.cc @@ -56,11 +56,11 @@ class ActivationOpConverter : public OpConverter { nvinfer1::IActivationLayer* layer = nullptr; if (op_type_ == "softplus") { const float beta = op_desc.HasAttr("beta") - ? BOOST_GET_CONST(float, op_desc.GetAttr("beta")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("beta")) : 1.0f; const float threshold = op_desc.HasAttr("threshold") - ? BOOST_GET_CONST(float, op_desc.GetAttr("threshold")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("threshold")) : 20.0f; auto* layer_clip = TRT_ENGINE_ADD_LAYER( engine_, Activation, *input_tensor, nvinfer1::ActivationType::kCLIP); @@ -82,29 +82,32 @@ class ActivationOpConverter : public OpConverter { layer->setBeta(6.); } if (op_type_ == "elu") { - const float alpha = op_desc.HasAttr("alpha") - ? BOOST_GET_CONST(float, op_desc.GetAttr("alpha")) - : 1.0f; + const float alpha = + op_desc.HasAttr("alpha") + ? PADDLE_GET_CONST(float, op_desc.GetAttr("alpha")) + : 1.0f; layer->setAlpha(alpha); } if (op_type_ == "selu") { - const float alpha = op_desc.HasAttr("alpha") - ? BOOST_GET_CONST(float, op_desc.GetAttr("alpha")) - : 1.0507009873554804934193349852946; - const float scale = op_desc.HasAttr("scale") - ? BOOST_GET_CONST(float, op_desc.GetAttr("scale")) - : 1.6732632423543772848170429916717; + const float alpha = + op_desc.HasAttr("alpha") + ? PADDLE_GET_CONST(float, op_desc.GetAttr("alpha")) + : 1.0507009873554804934193349852946; + const float scale = + op_desc.HasAttr("scale") + ? PADDLE_GET_CONST(float, op_desc.GetAttr("scale")) + : 1.6732632423543772848170429916717; layer->setAlpha(alpha); layer->setBeta(scale); } if (op_type_ == "stanh") { const float scale_a = op_desc.HasAttr("scale_a") - ? BOOST_GET_CONST(float, op_desc.GetAttr("scale_a")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("scale_a")) : 0.67f; const float scale_b = op_desc.HasAttr("scale_b") - ? BOOST_GET_CONST(float, op_desc.GetAttr("scale_b")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("scale_b")) : 1.7159f; layer->setAlpha(scale_b); layer->setBeta(scale_a); @@ -112,7 +115,7 @@ class ActivationOpConverter : public OpConverter { if (op_type_ == "thresholded_relu") { const float threshold = op_desc.HasAttr("threshold") - ? BOOST_GET_CONST(float, op_desc.GetAttr("threshold")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("threshold")) : 1.0f; layer->setAlpha(threshold); } diff --git a/paddle/fluid/inference/tensorrt/convert/anchor_generator_op.cc b/paddle/fluid/inference/tensorrt/convert/anchor_generator_op.cc index d1063dd86eddf6..b6c12b97ec3e5f 100644 --- a/paddle/fluid/inference/tensorrt/convert/anchor_generator_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/anchor_generator_op.cc @@ -36,14 +36,14 @@ class AnchorGeneratorOpConverter : public OpConverter { std::vector output_names{anchor_name, variance_name}; const auto anchor_sizes = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("anchor_sizes")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("anchor_sizes")); const auto aspect_ratios = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("aspect_ratios")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("aspect_ratios")); const auto stride = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("stride")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("stride")); const auto variances = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("variances")); - const auto offset = BOOST_GET_CONST(float, op_desc.GetAttr("offset")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("variances")); + const auto offset = PADDLE_GET_CONST(float, op_desc.GetAttr("offset")); const int num_anchors = aspect_ratios.size() * anchor_sizes.size(); bool is_dynamic = engine_->with_dynamic_shape(); const auto height = input_dims.d[1]; diff --git a/paddle/fluid/inference/tensorrt/convert/arg_max_op.cc b/paddle/fluid/inference/tensorrt/convert/arg_max_op.cc index 0de95e88ca4ead..9782e224e580a6 100644 --- a/paddle/fluid/inference/tensorrt/convert/arg_max_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/arg_max_op.cc @@ -39,7 +39,7 @@ class ArgMaxOpConverter : public OpConverter { auto input_dims = input->getDimensions(); int rank = input_dims.nbDims; int axis = op_desc.HasAttr("axis") - ? BOOST_GET_CONST(int64_t, op_desc.GetAttr("axis")) + ? PADDLE_GET_CONST(int64_t, op_desc.GetAttr("axis")) : -1; if (axis > 0 && !engine_->with_dynamic_shape()) { axis -= 1; @@ -49,7 +49,7 @@ class ArgMaxOpConverter : public OpConverter { engine_, TopK, *input, nvinfer1::TopKOperation::kMAX, 1, 1 << axis); auto output_name = op_desc.Output("Out")[0]; - bool keepdims = BOOST_GET_CONST(bool, op_desc.GetAttr("keepdims")); + bool keepdims = PADDLE_GET_CONST(bool, op_desc.GetAttr("keepdims")); if (keepdims) { RreplenishLayerAndOutput(topk_layer, "arg_max", diff --git a/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc b/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc index c5dae16bc3cacf..159f00f7a871ad 100644 --- a/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/batch_norm_op.cc @@ -45,7 +45,7 @@ class BatchNormOpConverter : public OpConverter { auto* Mean_v = scope.FindVar(op_desc.Input("Mean").front()); auto* Scale_v = scope.FindVar(op_desc.Input("Scale").front()); auto* Variance_v = scope.FindVar(op_desc.Input("Variance").front()); - const float eps = BOOST_GET_CONST(float, op_desc.GetAttr("epsilon")); + const float eps = PADDLE_GET_CONST(float, op_desc.GetAttr("epsilon")); auto output_name = op_desc.Output("Y").front(); PADDLE_ENFORCE_NOT_NULL( Bias_v, diff --git a/paddle/fluid/inference/tensorrt/convert/bilinear_interp_v2_op.cc b/paddle/fluid/inference/tensorrt/convert/bilinear_interp_v2_op.cc index 7bbda5fb48305a..37b525c6431753 100644 --- a/paddle/fluid/inference/tensorrt/convert/bilinear_interp_v2_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/bilinear_interp_v2_op.cc @@ -43,17 +43,17 @@ class BilinearInterpolateV2OpConverter : public OpConverter { auto input = engine_->GetITensor(input_name); auto data_layout = framework::StringToDataLayout( - BOOST_GET_CONST(std::string, op_desc.GetAttr("data_layout"))); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("data_layout"))); auto interp_method = - BOOST_GET_CONST(std::string, op_desc.GetAttr("interp_method")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("interp_method")); bool align_corners = - BOOST_GET_CONST(bool, op_desc.GetAttr("align_corners")); - auto align_mode = BOOST_GET_CONST(int, op_desc.GetAttr("align_mode")); + PADDLE_GET_CONST(bool, op_desc.GetAttr("align_corners")); + auto align_mode = PADDLE_GET_CONST(int, op_desc.GetAttr("align_mode")); auto resize_inputs = op_desc.Inputs(); auto input_names = op_desc.Input("X"); - auto out_h = BOOST_GET_CONST(int, op_desc.GetAttr("out_h")); - auto out_w = BOOST_GET_CONST(int, op_desc.GetAttr("out_w")); + auto out_h = PADDLE_GET_CONST(int, op_desc.GetAttr("out_h")); + auto out_w = PADDLE_GET_CONST(int, op_desc.GetAttr("out_w")); auto layer = TRT_ENGINE_ADD_LAYER(engine_, Resize, *input); if (align_mode == 0 && !align_corners) { @@ -77,7 +77,7 @@ class BilinearInterpolateV2OpConverter : public OpConverter { scale_w = scale_d[1]; } else { const std::vector scale_attr = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("scale")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("scale")); if (scale_attr.size() > 1) { scale_h = scale_attr[0]; scale_w = scale_attr[1]; diff --git a/paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc b/paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc index c60cfe4f2b552e..73eec4395f9679 100644 --- a/paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/c_allreduce_op.cc @@ -59,9 +59,9 @@ class CAllReduceOpConverter : public OpConverter { "But received Out's size %u.", output_num)); // Get attrs - int ring_id = BOOST_GET_CONST(int, op_desc.GetAttr("ring_id")); + int ring_id = PADDLE_GET_CONST(int, op_desc.GetAttr("ring_id")); bool use_calc_stream = - BOOST_GET_CONST(bool, op_desc.GetAttr("use_calc_stream")); + PADDLE_GET_CONST(bool, op_desc.GetAttr("use_calc_stream")); nvinfer1::ILayer* layer = nullptr; #if IS_TRT_VERSION_GE(6000) diff --git a/paddle/fluid/inference/tensorrt/convert/cast_op.cc b/paddle/fluid/inference/tensorrt/convert/cast_op.cc index 18ea71fbf3be50..ab62c43d851eb8 100644 --- a/paddle/fluid/inference/tensorrt/convert/cast_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/cast_op.cc @@ -37,7 +37,7 @@ class CastOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); auto* input = engine_->GetITensor(op_desc.Input("X")[0]); - auto out_dtype = BOOST_GET_CONST(int, op_desc.GetAttr("out_dtype")); + auto out_dtype = PADDLE_GET_CONST(int, op_desc.GetAttr("out_dtype")); auto* layer = TRT_ENGINE_ADD_LAYER(engine_, Identity, *input); diff --git a/paddle/fluid/inference/tensorrt/convert/clip_op.cc b/paddle/fluid/inference/tensorrt/convert/clip_op.cc index 6961a117e8c25b..386a4262d5b15c 100644 --- a/paddle/fluid/inference/tensorrt/convert/clip_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/clip_op.cc @@ -41,8 +41,8 @@ class ClipOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); // Declare inputs auto* input = engine_->GetITensor(op_desc.Input("X")[0]); - float min = BOOST_GET_CONST(float, op_desc.GetAttr("min")); - float max = BOOST_GET_CONST(float, op_desc.GetAttr("max")); + float min = PADDLE_GET_CONST(float, op_desc.GetAttr("min")); + float max = PADDLE_GET_CONST(float, op_desc.GetAttr("max")); auto* layer = TRT_ENGINE_ADD_LAYER( engine_, Activation, *input, nvinfer1::ActivationType::kCLIP); layer->setAlpha(min); diff --git a/paddle/fluid/inference/tensorrt/convert/concat_op.cc b/paddle/fluid/inference/tensorrt/convert/concat_op.cc index ff75a2d3719331..a248178338cb95 100644 --- a/paddle/fluid/inference/tensorrt/convert/concat_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/concat_op.cc @@ -44,7 +44,7 @@ class ConcatOpConverter : public OpConverter { for (auto& input_name : op_desc.Input("X")) { itensors.push_back(engine_->GetITensor(input_name)); } - int axis = BOOST_GET_CONST(int, op_desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, op_desc.GetAttr("axis")); if (axis == -1) { axis = (engine_->GetITensor(op_desc.Input("X").front())->getDimensions()) .nbDims - diff --git a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc index c47f6d03cd5432..c483202bfa3e4b 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv2d_op.cc @@ -55,7 +55,7 @@ void ConvertConv2d(TensorRTEngine* engine, if (enable_int8) { #if IS_TRT_VERSION_GE(5000) - float in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + float in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); engine->SetTensorDynamicRange(X, in_scale); #endif } @@ -70,17 +70,17 @@ void ConvertConv2d(TensorRTEngine* engine, const int n_input = Y_t->dims()[1]; const int filter_h = Y_t->dims()[2]; const int filter_w = Y_t->dims()[3]; - const int groups = BOOST_GET_CONST(int, op_desc.GetAttr("groups")); + const int groups = PADDLE_GET_CONST(int, op_desc.GetAttr("groups")); const std::vector dilations = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("dilations")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("dilations")); const std::vector strides = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); std::vector paddings = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); std::string padding_algorithm = "EXPLICIT"; if (op_desc.HasAttr("padding_algorithm")) padding_algorithm = - BOOST_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); if (padding_algorithm == "VALID") { for (size_t i = 0; i < paddings.size(); i++) { paddings[i] = 0; diff --git a/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc b/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc index 4ffc8056547272..de6b24eabce38d 100644 --- a/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/conv3d_op.cc @@ -51,7 +51,7 @@ void ConvertConv3d(TensorRTEngine* engine, bool enable_int8 = op_desc.HasAttr("enable_int8"); if (enable_int8) { - float in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + float in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); engine->SetTensorDynamicRange(X, in_scale); } @@ -66,17 +66,17 @@ void ConvertConv3d(TensorRTEngine* engine, const int filter_d = Y_t->dims()[2]; const int filter_h = Y_t->dims()[3]; const int filter_w = Y_t->dims()[4]; - const int groups = BOOST_GET_CONST(int, op_desc.GetAttr("groups")); + const int groups = PADDLE_GET_CONST(int, op_desc.GetAttr("groups")); const std::vector dilations = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("dilations")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("dilations")); const std::vector strides = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); const std::vector paddings = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); std::string padding_algorithm = "EXPLICIT"; if (op_desc.HasAttr("padding_algorithm")) padding_algorithm = - BOOST_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); nvinfer1::Dims3 nv_ksize(filter_d, filter_h, filter_w); nvinfer1::Dims3 nv_dilations(dilations[0], dilations[1], dilations[2]); diff --git a/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc b/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc index 8cf7f6528e5950..35594a76b0abbc 100644 --- a/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/deformable_conv_op.cc @@ -56,16 +56,16 @@ class DeformableConvOpConverter : public OpConverter { std::vector kernel_dims = {c_o, c_i, k_h, k_w}; auto strides = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); auto paddings = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); auto dilations = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("dilations")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("dilations")); - auto groups = BOOST_GET_CONST(int, op_desc.GetAttr("groups")); + auto groups = PADDLE_GET_CONST(int, op_desc.GetAttr("groups")); auto deformable_groups = - BOOST_GET_CONST(int, op_desc.GetAttr("deformable_groups")); - auto im2col_step = BOOST_GET_CONST(int, op_desc.GetAttr("im2col_step")); + PADDLE_GET_CONST(int, op_desc.GetAttr("deformable_groups")); + auto im2col_step = PADDLE_GET_CONST(int, op_desc.GetAttr("im2col_step")); nvinfer1::Weights weights; weights.count = filter_tensor->numel(); diff --git a/paddle/fluid/inference/tensorrt/convert/dropout_op.cc b/paddle/fluid/inference/tensorrt/convert/dropout_op.cc index 00d9d047a697da..e9cc82f7d29bd6 100644 --- a/paddle/fluid/inference/tensorrt/convert/dropout_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/dropout_op.cc @@ -41,11 +41,11 @@ class DropoutOpConverter : public OpConverter { // Declare inputs auto* input1 = engine_->GetITensor(op_desc.Input("X")[0]); float dropout_prob = - BOOST_GET_CONST(float, op_desc.GetAttr("dropout_prob")); + PADDLE_GET_CONST(float, op_desc.GetAttr("dropout_prob")); std::string downgrade_in_infer = ""; if (op_desc.HasAttr("dropout_implementation")) { - downgrade_in_infer = BOOST_GET_CONST( + downgrade_in_infer = PADDLE_GET_CONST( std::string, op_desc.GetAttr("dropout_implementation")); } diff --git a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc index 7fd89dd731a8e5..e873ad4f624fc0 100644 --- a/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/elementwise_op.cc @@ -75,7 +75,7 @@ class ElementwiseTensorOpConverter : public OpConverter { auto output_name = op_desc.Output("Out")[0]; // axis here is relative to explicit batch - int axis = BOOST_GET_CONST(int, op_desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, op_desc.GetAttr("axis")); int real_x_rank = dims_x.nbDims; int real_y_rank = dims_y.nbDims; if (!engine_->with_dynamic_shape()) { diff --git a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc index 5020b976277530..cb6797c4e2a71d 100644 --- a/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/emb_eltwise_layernorm.cc @@ -220,7 +220,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { free(plugin_ptr); if (enable_int8) { float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); engine_->SetTensorDynamicRange(plugin_layer->getOutput(0), out_scale); engine_->SetTensorDynamicRange(plugin_layer->getOutput(1), out_scale); } @@ -252,7 +252,7 @@ class EmbEltwiseLayerNormOpConverter : public OpConverter { } else { bool with_fp16 = engine_->WithFp16() && !engine_->disable_trt_plugin_fp16(); - float eps = BOOST_GET_CONST(float, op_desc.GetAttr("epsilon")); + float eps = PADDLE_GET_CONST(float, op_desc.GetAttr("epsilon")); plugin::DynamicPluginTensorRT* plugin = nullptr; std::vector input_embs_data; for (size_t i = 0; i < input_embs.size(); ++i) { diff --git a/paddle/fluid/inference/tensorrt/convert/equal_op.cc b/paddle/fluid/inference/tensorrt/convert/equal_op.cc index 2e29c0f7007526..3a9627dc99a5c3 100644 --- a/paddle/fluid/inference/tensorrt/convert/equal_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/equal_op.cc @@ -44,7 +44,7 @@ class EqualOpConverter : public OpConverter { nvinfer1::Dims dims_x = X->getDimensions(); nvinfer1::Dims dims_y = Y->getDimensions(); - int axis = BOOST_GET_CONST(int, op_desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, op_desc.GetAttr("axis")); if (axis < 0) { axis = std::abs(dims_x.nbDims - dims_y.nbDims); } diff --git a/paddle/fluid/inference/tensorrt/convert/fc_op.cc b/paddle/fluid/inference/tensorrt/convert/fc_op.cc index 1bd9cf8712d989..298c551f3d45a2 100644 --- a/paddle/fluid/inference/tensorrt/convert/fc_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/fc_op.cc @@ -158,26 +158,26 @@ class FcOpConverter : public OpConverter { auto* Y_t = Y_v->GetMutable(); int x_num_col_dims = op_desc.HasAttr("x_num_col_dims") - ? BOOST_GET_CONST(int, op_desc.GetAttr("x_num_col_dims")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("x_num_col_dims")) : (op_desc.HasAttr("in_num_col_dims") - ? BOOST_GET_CONST(int, op_desc.GetAttr("in_num_col_dims")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("in_num_col_dims")) : 1); const std::string activation_type = op_desc.HasAttr("activation_type") - ? BOOST_GET_CONST(std::string, op_desc.GetAttr("activation_type")) + ? PADDLE_GET_CONST(std::string, op_desc.GetAttr("activation_type")) : ""; bool enable_int8 = op_desc.HasAttr("enable_int8"); bool support_int8 = false; if (op_desc.HasAttr("support_int8")) { - support_int8 = BOOST_GET_CONST(bool, op_desc.GetAttr("support_int8")); + support_int8 = PADDLE_GET_CONST(bool, op_desc.GetAttr("support_int8")); } float in_scale = 0; if (enable_int8 || support_int8) { if (enable_int8) { - in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); } else { - in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("X")); + in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("X")); } engine_->SetTensorDynamicRange(X, in_scale); } @@ -204,9 +204,9 @@ class FcOpConverter : public OpConverter { true, platform::errors::InvalidArgument( "must have out threshold in fc layers in int8 mode")); - out_scale = BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); } else { - out_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Out")); + out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Out")); } nvinfer1::DimsHW nv_ksize(1, 1); auto* fc_layer_int8 = TRT_ENGINE_ADD_LAYER(engine_, @@ -280,7 +280,7 @@ class FcOpConverter : public OpConverter { bool transpose_y = false; if (op_desc.HasAttr("transpose_Y")) { - transpose_y = BOOST_GET_CONST(bool, op_desc.GetAttr("transpose_Y")); + transpose_y = PADDLE_GET_CONST(bool, op_desc.GetAttr("transpose_Y")); } int weight_w, weight_h; auto weight = engine_->GetTrtWeight(op_desc.Input(w_name).front(), *Y_t); @@ -358,9 +358,9 @@ class FcOpConverter : public OpConverter { float out_scale = 0; if (enable_int8) { out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); } else { - out_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Out")); + out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Out")); } engine_->SetTensorDynamicRange(fc_layer_int8->getOutput(0), out_scale); diff --git a/paddle/fluid/inference/tensorrt/convert/flatten_contiguous_range_op.cc b/paddle/fluid/inference/tensorrt/convert/flatten_contiguous_range_op.cc index 4a7ccf02182dd7..9caa083deec769 100644 --- a/paddle/fluid/inference/tensorrt/convert/flatten_contiguous_range_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/flatten_contiguous_range_op.cc @@ -37,8 +37,8 @@ class FlattenContiguousRangeOpConverter : public OpConverter { auto* input = engine_->GetITensor(op_desc.Input("X")[0]); const auto input_dim = input->getDimensions(); const int dims = input_dim.nbDims; - int start_axis = BOOST_GET_CONST(int, op_desc.GetAttr("start_axis")); - int stop_axis = BOOST_GET_CONST(int, op_desc.GetAttr("stop_axis")); + int start_axis = PADDLE_GET_CONST(int, op_desc.GetAttr("start_axis")); + int stop_axis = PADDLE_GET_CONST(int, op_desc.GetAttr("stop_axis")); nvinfer1::IShuffleLayer* layer = TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *input); diff --git a/paddle/fluid/inference/tensorrt/convert/fused_token_prune_op.cc b/paddle/fluid/inference/tensorrt/convert/fused_token_prune_op.cc index bab04ac16aac9d..92a74e65adb931 100644 --- a/paddle/fluid/inference/tensorrt/convert/fused_token_prune_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/fused_token_prune_op.cc @@ -30,11 +30,12 @@ class FusedTokenPruneOpConverter : public OpConverter { auto* NewMask = engine_->GetITensor(op_desc.Input("NewMask").front()); bool keep_first_token = op_desc.HasAttr("keep_first_token") - ? BOOST_GET_CONST(bool, op_desc.GetAttr("keep_first_token")) + ? PADDLE_GET_CONST(bool, op_desc.GetAttr("keep_first_token")) : true; - bool keep_order = op_desc.HasAttr("keep_order") - ? BOOST_GET_CONST(bool, op_desc.GetAttr("keep_order")) - : false; + bool keep_order = + op_desc.HasAttr("keep_order") + ? PADDLE_GET_CONST(bool, op_desc.GetAttr("keep_order")) + : false; std::vector itensors = {Attn, X, Mask, NewMask}; diff --git a/paddle/fluid/inference/tensorrt/convert/gather_op.cc b/paddle/fluid/inference/tensorrt/convert/gather_op.cc index 3e9bd987441d12..8bd190771f668b 100644 --- a/paddle/fluid/inference/tensorrt/convert/gather_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/gather_op.cc @@ -47,7 +47,7 @@ class GatherOpConverter : public OpConverter { int axis = 0; if (op_desc.HasAttr("axis")) { - axis = BOOST_GET_CONST(int, op_desc.GetAttr("axis")); + axis = PADDLE_GET_CONST(int, op_desc.GetAttr("axis")); } auto reshape_layer = TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *index_tensor); diff --git a/paddle/fluid/inference/tensorrt/convert/gelu_op.cc b/paddle/fluid/inference/tensorrt/convert/gelu_op.cc index 269e97b8dc270b..845e5c7d704ca4 100644 --- a/paddle/fluid/inference/tensorrt/convert/gelu_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/gelu_op.cc @@ -50,7 +50,7 @@ class GeluOpConverter : public OpConverter { nvinfer1::ILayer* layer = nullptr; if (op_desc.HasAttr("approximate") && - BOOST_GET_CONST(bool, op_desc.GetAttr("approximate"))) { + PADDLE_GET_CONST(bool, op_desc.GetAttr("approximate"))) { #if IS_TRT_VERSION_GE(7000) nvinfer1::Dims input_shape; input_shape.nbDims = input->getDimensions().nbDims; diff --git a/paddle/fluid/inference/tensorrt/convert/group_norm_op.cc b/paddle/fluid/inference/tensorrt/convert/group_norm_op.cc index 1b452644753540..275837ea6a77bf 100644 --- a/paddle/fluid/inference/tensorrt/convert/group_norm_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/group_norm_op.cc @@ -38,8 +38,8 @@ class GroupNormOpConverter : public OpConverter { auto* input_itensor = engine_->GetITensor(op_desc.Input("X").front()); - int groups = BOOST_GET_CONST(int, op_desc.GetAttr("groups")); - float epsilon = BOOST_GET_CONST(float, op_desc.GetAttr("epsilon")); + int groups = PADDLE_GET_CONST(int, op_desc.GetAttr("groups")); + float epsilon = PADDLE_GET_CONST(float, op_desc.GetAttr("epsilon")); std::string scale_name = op_desc.Input("Scale").front(); std::string bias_name = op_desc.Input("Bias").front(); diff --git a/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc b/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc index ad89bb8df1d447..8d2f351c18ce92 100644 --- a/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/hard_sigmoid_op.cc @@ -42,8 +42,8 @@ class HardSigmoidOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); // Declare inputs auto* input = engine_->GetITensor(op_desc.Input("X")[0]); - float slope = BOOST_GET_CONST(float, op_desc.GetAttr("slope")); - float offset = BOOST_GET_CONST(float, op_desc.GetAttr("offset")); + float slope = PADDLE_GET_CONST(float, op_desc.GetAttr("slope")); + float offset = PADDLE_GET_CONST(float, op_desc.GetAttr("offset")); auto* layer = TRT_ENGINE_ADD_LAYER( engine_, Activation, *input, nvinfer1::ActivationType::kHARD_SIGMOID); layer->setAlpha(slope); diff --git a/paddle/fluid/inference/tensorrt/convert/hard_swish_op.cc b/paddle/fluid/inference/tensorrt/convert/hard_swish_op.cc index 0c3b7651454ac2..add9e5638f6b2c 100644 --- a/paddle/fluid/inference/tensorrt/convert/hard_swish_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/hard_swish_op.cc @@ -46,14 +46,15 @@ class HardSwishOpConverter : public OpConverter { const float threshold = op_desc.HasAttr("threshold") - ? BOOST_GET_CONST(float, op_desc.GetAttr("threshold")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("threshold")) : 6.0f; const float scale = op_desc.HasAttr("scale") - ? BOOST_GET_CONST(float, op_desc.GetAttr("scale")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("scale")) : 6.0f; - const float offset = op_desc.HasAttr("offset") - ? BOOST_GET_CONST(float, op_desc.GetAttr("offset")) - : 3.0f; + const float offset = + op_desc.HasAttr("offset") + ? PADDLE_GET_CONST(float, op_desc.GetAttr("offset")) + : 3.0f; nvinfer1::ILayer* layer = nullptr; if (threshold == scale) { auto* hsig_layer = TRT_ENGINE_ADD_LAYER( diff --git a/paddle/fluid/inference/tensorrt/convert/instance_norm_op.cc b/paddle/fluid/inference/tensorrt/convert/instance_norm_op.cc index 6564de4d4bffc9..69308025f57bad 100644 --- a/paddle/fluid/inference/tensorrt/convert/instance_norm_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/instance_norm_op.cc @@ -42,7 +42,7 @@ class InstanceNormOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); auto* input = engine_->GetITensor(op_desc.Input("X")[0]); - float eps = BOOST_GET_CONST(float, op_desc.GetAttr("epsilon")); + float eps = PADDLE_GET_CONST(float, op_desc.GetAttr("epsilon")); auto* scale_var = scope.FindVar(op_desc.Input("Scale")[0]); auto* bias_var = scope.FindVar(op_desc.Input("Bias")[0]); diff --git a/paddle/fluid/inference/tensorrt/convert/layer_norm_op.cc b/paddle/fluid/inference/tensorrt/convert/layer_norm_op.cc index c899f4f6e777e7..359ea2b343dca9 100644 --- a/paddle/fluid/inference/tensorrt/convert/layer_norm_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/layer_norm_op.cc @@ -32,10 +32,10 @@ class LayerNormOpConverter : public OpConverter { auto* Scale_v = scope.FindVar(op_desc.Input("Scale").front()); const int begin_norm_axis = op_desc.HasAttr("begin_norm_axis") - ? BOOST_GET_CONST(int, op_desc.GetAttr("begin_norm_axis")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("begin_norm_axis")) : 1; const float eps = op_desc.HasAttr("epsilon") - ? BOOST_GET_CONST(float, op_desc.GetAttr("epsilon")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("epsilon")) : 1e-5f; PADDLE_ENFORCE_NOT_NULL( Bias_v, diff --git a/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc b/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc index 8a2660cff7622c..8d4928a7b9c133 100644 --- a/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/leaky_relu_op.cc @@ -39,7 +39,7 @@ class LeakyReluOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); auto* input = engine_->GetITensor(op_desc.Input("X")[0]); // Get attrs - float alpha = BOOST_GET_CONST(float, op_desc.GetAttr("alpha")); + float alpha = PADDLE_GET_CONST(float, op_desc.GetAttr("alpha")); nvinfer1::ILayer* output_layer = nullptr; #if IS_TRT_VERSION_GE(5100) @@ -51,7 +51,7 @@ class LeakyReluOpConverter : public OpConverter { bool enable_int8 = op_desc.HasAttr("enable_int8"); if (enable_int8) { CHECK(op_desc.HasAttr("Input_scale")); - float in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + float in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); engine_->SetTensorDynamicRange(input, in_scale); } #else diff --git a/paddle/fluid/inference/tensorrt/convert/matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/matmul_op.cc index d887044829d76a..6752bf1d497688 100644 --- a/paddle/fluid/inference/tensorrt/convert/matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/matmul_op.cc @@ -48,13 +48,13 @@ class MatMulOpConverter : public OpConverter { nvinfer1::Dims dims_x = input1->getDimensions(); nvinfer1::Dims dims_y = input2->getDimensions(); - bool transpose_X = BOOST_GET_CONST(bool, op_desc.GetAttr("transpose_X")); - bool transpose_Y = BOOST_GET_CONST(bool, op_desc.GetAttr("transpose_Y")); + bool transpose_X = PADDLE_GET_CONST(bool, op_desc.GetAttr("transpose_X")); + bool transpose_Y = PADDLE_GET_CONST(bool, op_desc.GetAttr("transpose_Y")); auto output_name = op_desc.Output("Out")[0]; float alpha = 1; if (op_desc.HasAttr("alpha")) { - float alpha_tem = BOOST_GET_CONST(float, op_desc.GetAttr("alpha")); + float alpha_tem = PADDLE_GET_CONST(float, op_desc.GetAttr("alpha")); alpha = alpha_tem; } nvinfer1::MatrixOperation matrix_operation_X = @@ -65,7 +65,7 @@ class MatMulOpConverter : public OpConverter { : nvinfer1::MatrixOperation::kNONE; if (op_desc.HasAttr("support_int8") && - BOOST_GET_CONST(bool, op_desc.GetAttr("support_int8")) && + PADDLE_GET_CONST(bool, op_desc.GetAttr("support_int8")) && engine_->precision() == AnalysisConfig::Precision::kInt8 && platform::GetGPUComputeCapability(0) >= 75) { if (engine_->with_dynamic_shape()) { diff --git a/paddle/fluid/inference/tensorrt/convert/mish_op.cc b/paddle/fluid/inference/tensorrt/convert/mish_op.cc index 3a9b31c33bb11b..4bcd349055bc4d 100644 --- a/paddle/fluid/inference/tensorrt/convert/mish_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/mish_op.cc @@ -46,7 +46,7 @@ class MishOpConverter : public OpConverter { const float threshold = op_desc.HasAttr("threshold") - ? BOOST_GET_CONST(float, op_desc.GetAttr("threshold")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("threshold")) : 20.0f; nvinfer1::ILayer* layer = nullptr; diff --git a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc index 73d5f661d12915..53e8ffb1c0ffba 100644 --- a/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multiclass_nms3_op.cc @@ -46,14 +46,14 @@ class MultiClassNMS3OpConverter : public OpConverter { auto* scores_tensor = engine_->GetITensor(scores); int background_label = - BOOST_GET_CONST(int, op_desc.GetAttr("background_label")); + PADDLE_GET_CONST(int, op_desc.GetAttr("background_label")); float score_threshold = - BOOST_GET_CONST(float, op_desc.GetAttr("score_threshold")); - int nms_top_k = BOOST_GET_CONST(int, op_desc.GetAttr("nms_top_k")); + PADDLE_GET_CONST(float, op_desc.GetAttr("score_threshold")); + int nms_top_k = PADDLE_GET_CONST(int, op_desc.GetAttr("nms_top_k")); float nms_threshold = - BOOST_GET_CONST(float, op_desc.GetAttr("nms_threshold")); - int keep_top_k = BOOST_GET_CONST(int, op_desc.GetAttr("keep_top_k")); - bool normalized = BOOST_GET_CONST(bool, op_desc.GetAttr("normalized")); + PADDLE_GET_CONST(float, op_desc.GetAttr("nms_threshold")); + int keep_top_k = PADDLE_GET_CONST(int, op_desc.GetAttr("keep_top_k")); + bool normalized = PADDLE_GET_CONST(bool, op_desc.GetAttr("normalized")); int num_classes = scores_tensor->getDimensions().d[0]; auto bboxes_dims = bboxes_tensor->getDimensions(); diff --git a/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc b/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc index eaeb7279c34dae..1266b1b621d56d 100644 --- a/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multiclass_nms_op.cc @@ -44,14 +44,14 @@ class MultiClassNMSOpConverter : public OpConverter { auto* scores_tensor = engine_->GetITensor(scores); int background_label = - BOOST_GET_CONST(int, op_desc.GetAttr("background_label")); + PADDLE_GET_CONST(int, op_desc.GetAttr("background_label")); float score_threshold = - BOOST_GET_CONST(float, op_desc.GetAttr("score_threshold")); - int nms_top_k = BOOST_GET_CONST(int, op_desc.GetAttr("nms_top_k")); + PADDLE_GET_CONST(float, op_desc.GetAttr("score_threshold")); + int nms_top_k = PADDLE_GET_CONST(int, op_desc.GetAttr("nms_top_k")); float nms_threshold = - BOOST_GET_CONST(float, op_desc.GetAttr("nms_threshold")); - int keep_top_k = BOOST_GET_CONST(int, op_desc.GetAttr("keep_top_k")); - bool normalized = BOOST_GET_CONST(bool, op_desc.GetAttr("normalized")); + PADDLE_GET_CONST(float, op_desc.GetAttr("nms_threshold")); + int keep_top_k = PADDLE_GET_CONST(int, op_desc.GetAttr("keep_top_k")); + bool normalized = PADDLE_GET_CONST(bool, op_desc.GetAttr("normalized")); int num_classes = scores_tensor->getDimensions().d[0]; auto bboxes_dims = bboxes_tensor->getDimensions(); diff --git a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc index 8bc44cc6ab9d2a..5579844e1acf86 100644 --- a/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/multihead_matmul_op.cc @@ -45,7 +45,7 @@ class MultiheadMatMulOpConverter : public OpConverter { float in_scale = 0.; if (op_desc.HasAttr("Input_scale")) { - in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); engine_->SetTensorDynamicRange(input, in_scale); } weight_data = const_cast(static_cast( @@ -75,7 +75,7 @@ class MultiheadMatMulOpConverter : public OpConverter { }; tranpose_weight(weight_data_tmp.data(), weight_data, m, n); - int head_number = BOOST_GET_CONST(int, op_desc.GetAttr("head_number")); + int head_number = PADDLE_GET_CONST(int, op_desc.GetAttr("head_number")); nvinfer1::ILayer* layer = nullptr; auto output_name = op_desc.Output("Out")[0]; @@ -116,11 +116,11 @@ class MultiheadMatMulOpConverter : public OpConverter { platform::errors::InvalidArgument( "must have out_threshold in multihead layers in int8 mode")); float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); engine_->SetTensorDynamicRange(fc_layer->getOutput(0), out_scale); if (qkv2context_plugin_int8) { dp_probs = - BOOST_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; + PADDLE_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; } auto creator = GetPluginRegistry()->getPluginCreator( "CustomQKVToContextPluginDynamic", "3"); @@ -249,11 +249,11 @@ class MultiheadMatMulOpConverter : public OpConverter { "must have out threshold in multihead layers " "in int8 mode")); float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); engine_->SetTensorDynamicRange(fc_layer->getOutput(0), out_scale); if (qkv2context_plugin_int8) { dp_probs = - BOOST_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; + PADDLE_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; } } auto creator = GetPluginRegistry()->getPluginCreator( @@ -391,7 +391,7 @@ class MultiheadMatMulOpConverter : public OpConverter { platform::errors::InvalidArgument( "must have out threshold in multihead layers in int8 mode")); float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); engine_->SetTensorDynamicRange(fc_layer->getOutput(0), out_scale); } fc_layer->setName( @@ -402,7 +402,7 @@ class MultiheadMatMulOpConverter : public OpConverter { // add qkv to context int head_size = hidden_out / head_number; - float scale = BOOST_GET_CONST(float, op_desc.GetAttr("alpha")); + float scale = PADDLE_GET_CONST(float, op_desc.GetAttr("alpha")); std::vector plugin_inputs; plugin_inputs.push_back(fc_layer->getOutput(0)); diff --git a/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc b/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc index 18c300af930b62..361d719238c221 100644 --- a/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/nearest_interp_op.cc @@ -41,17 +41,17 @@ class NearestInterpolateOpConverter : public OpConverter { auto data_layout = !op_desc.HasAttr("data_layout") ? framework::DataLayout::kNCHW - : framework::StringToDataLayout(BOOST_GET_CONST( + : framework::StringToDataLayout(PADDLE_GET_CONST( std::string, op_desc.GetAttr("data_layout"))); auto interp_method = - BOOST_GET_CONST(std::string, op_desc.GetAttr("interp_method")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("interp_method")); bool align_corners = - BOOST_GET_CONST(bool, op_desc.GetAttr("align_corners")); + PADDLE_GET_CONST(bool, op_desc.GetAttr("align_corners")); auto input_names = op_desc.Input("X"); - auto scale = BOOST_GET_CONST(float, op_desc.GetAttr("scale")); - auto out_h = BOOST_GET_CONST(int, op_desc.GetAttr("out_h")); - auto out_w = BOOST_GET_CONST(int, op_desc.GetAttr("out_w")); + auto scale = PADDLE_GET_CONST(float, op_desc.GetAttr("scale")); + auto out_h = PADDLE_GET_CONST(int, op_desc.GetAttr("out_h")); + auto out_w = PADDLE_GET_CONST(int, op_desc.GetAttr("out_w")); auto layer = TRT_ENGINE_ADD_LAYER(engine_, Resize, *input); layer->setAlignCorners(align_corners); diff --git a/paddle/fluid/inference/tensorrt/convert/nearest_interp_v2_op.cc b/paddle/fluid/inference/tensorrt/convert/nearest_interp_v2_op.cc index fb790a1ba82e0e..c74dfe58864002 100644 --- a/paddle/fluid/inference/tensorrt/convert/nearest_interp_v2_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/nearest_interp_v2_op.cc @@ -40,16 +40,16 @@ class NearestInterpolateV2OpConverter : public OpConverter { auto input = engine_->GetITensor(input_name); auto data_layout = framework::StringToDataLayout( - BOOST_GET_CONST(std::string, op_desc.GetAttr("data_layout"))); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("data_layout"))); auto interp_method = - BOOST_GET_CONST(std::string, op_desc.GetAttr("interp_method")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("interp_method")); bool align_corners = - BOOST_GET_CONST(bool, op_desc.GetAttr("align_corners")); + PADDLE_GET_CONST(bool, op_desc.GetAttr("align_corners")); auto input_names = op_desc.Input("X"); - auto scale = BOOST_GET_CONST(std::vector, op_desc.GetAttr("scale")); - auto out_h = BOOST_GET_CONST(int, op_desc.GetAttr("out_h")); - auto out_w = BOOST_GET_CONST(int, op_desc.GetAttr("out_w")); + auto scale = PADDLE_GET_CONST(std::vector, op_desc.GetAttr("scale")); + auto out_h = PADDLE_GET_CONST(int, op_desc.GetAttr("out_h")); + auto out_w = PADDLE_GET_CONST(int, op_desc.GetAttr("out_w")); auto layer = TRT_ENGINE_ADD_LAYER(engine_, Resize, *input); layer->setAlignCorners(align_corners); diff --git a/paddle/fluid/inference/tensorrt/convert/op_converter.h b/paddle/fluid/inference/tensorrt/convert/op_converter.h index 0eb2bc0875fdfb..5b442c763c02c3 100644 --- a/paddle/fluid/inference/tensorrt/convert/op_converter.h +++ b/paddle/fluid/inference/tensorrt/convert/op_converter.h @@ -160,7 +160,7 @@ class OpConverter { // only one out settensordynamicRange if (op_desc.HasAttr("out_threshold")) { float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); std::string output_name = ""; if (op_desc.HasOutput("Output")) { output_name = op_desc.Output("Output").front(); @@ -183,7 +183,7 @@ class OpConverter { // outs settensordynamicRange for (size_t i = 0; i < output_num; ++i) { if (op_desc.HasAttr("out_" + std::to_string(i) + "_threshold")) { - float out_scale = BOOST_GET_CONST( + float out_scale = PADDLE_GET_CONST( float, op_desc.GetAttr("out_" + std::to_string(i) + "_threshold")); std::string output_name = op_desc.Output(op_desc.OutputNames()[i]).front(); @@ -204,7 +204,7 @@ class OpConverter { std::string input_tensor_name = op_desc.Input(inputs_name[i])[0]; auto* input_itensor = engine->GetITensor(input_tensor_name); float input_scale = - BOOST_GET_CONST(float, op_desc.GetAttr(inputs_name[i])); + PADDLE_GET_CONST(float, op_desc.GetAttr(inputs_name[i])); engine->SetTensorDynamicRange(input_itensor, input_scale); VLOG(1) << "Set input tensor scale = " << input_scale << " for tensor: " << input_tensor_name << "."; @@ -215,7 +215,7 @@ class OpConverter { std::string output_tensor_name = op_desc.Output(outputs_name[i])[0]; auto* output_itensor = engine->GetITensor(output_tensor_name); float output_scale = - BOOST_GET_CONST(float, op_desc.GetAttr(outputs_name[i])); + PADDLE_GET_CONST(float, op_desc.GetAttr(outputs_name[i])); engine->SetTensorDynamicRange(output_itensor, output_scale); VLOG(1) << "Set output tensor scale = " << output_scale << " for tensor: " << output_tensor_name << "."; diff --git a/paddle/fluid/inference/tensorrt/convert/pad_op.cc b/paddle/fluid/inference/tensorrt/convert/pad_op.cc index 63d31d0c1723d2..cb5e6aeb17f7ed 100644 --- a/paddle/fluid/inference/tensorrt/convert/pad_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/pad_op.cc @@ -43,7 +43,7 @@ class PadOpConverter : public OpConverter { auto* input = engine_->GetITensor(op_desc.Input("X")[0]); const std::vector paddings = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); int pad_size = static_cast(paddings.size()); diff --git a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc index 543d7742324750..f9bba14e108bad 100644 --- a/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/pool2d_op.cc @@ -75,26 +75,26 @@ class Pool2dOpConverter : public OpConverter { int input_dims = input_shape.nbDims; bool global_pooling = - BOOST_GET_CONST(bool, op_desc.GetAttr("global_pooling")); + PADDLE_GET_CONST(bool, op_desc.GetAttr("global_pooling")); std::string pool_type = - BOOST_GET_CONST(std::string, op_desc.GetAttr("pooling_type")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("pooling_type")); std::vector ksize = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("ksize")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("ksize")); std::vector strides = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); std::vector paddings = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); bool exclusive = op_desc.HasAttr("exclusive") - ? BOOST_GET_CONST(bool, op_desc.GetAttr("exclusive")) + ? PADDLE_GET_CONST(bool, op_desc.GetAttr("exclusive")) : true; - bool ceil_mode = BOOST_GET_CONST(bool, op_desc.GetAttr("ceil_mode")); + bool ceil_mode = PADDLE_GET_CONST(bool, op_desc.GetAttr("ceil_mode")); bool adaptive = false; if (op_desc.HasAttr("adaptive")) - adaptive = BOOST_GET_CONST(bool, op_desc.GetAttr("adaptive")); + adaptive = PADDLE_GET_CONST(bool, op_desc.GetAttr("adaptive")); std::string padding_algorithm = "EXPLICIT"; if (op_desc.HasAttr("padding_algorithm")) padding_algorithm = - BOOST_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); nvinfer1::PoolingType nv_pool_type = nvinfer1::PoolingType::kMAX; nvinfer1::ReduceOperation reduce_operation = @@ -138,7 +138,7 @@ class Pool2dOpConverter : public OpConverter { if (op_desc.HasAttr("enable_int8")) { CHECK(op_desc.HasAttr("Input_scale")); float input_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); engine_->SetTensorDynamicRange(input1, input_scale); } diff --git a/paddle/fluid/inference/tensorrt/convert/pool3d_op.cc b/paddle/fluid/inference/tensorrt/convert/pool3d_op.cc index 80af56a356a23e..2ce0da4f840134 100644 --- a/paddle/fluid/inference/tensorrt/convert/pool3d_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/pool3d_op.cc @@ -84,26 +84,26 @@ class Pool3dOpConverter : public OpConverter { int input_dims = input_shape.nbDims; bool global_pooling = - BOOST_GET_CONST(bool, op_desc.GetAttr("global_pooling")); + PADDLE_GET_CONST(bool, op_desc.GetAttr("global_pooling")); std::string pool_type = - BOOST_GET_CONST(std::string, op_desc.GetAttr("pooling_type")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("pooling_type")); std::vector ksize = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("ksize")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("ksize")); std::vector strides = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); std::vector paddings = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("paddings")); bool exclusive = op_desc.HasAttr("exclusive") - ? BOOST_GET_CONST(bool, op_desc.GetAttr("exclusive")) + ? PADDLE_GET_CONST(bool, op_desc.GetAttr("exclusive")) : true; - bool ceil_mode = BOOST_GET_CONST(bool, op_desc.GetAttr("ceil_mode")); + bool ceil_mode = PADDLE_GET_CONST(bool, op_desc.GetAttr("ceil_mode")); bool adaptive = false; if (op_desc.HasAttr("adaptive")) - adaptive = BOOST_GET_CONST(bool, op_desc.GetAttr("adaptive")); + adaptive = PADDLE_GET_CONST(bool, op_desc.GetAttr("adaptive")); std::string padding_algorithm = "EXPLICIT"; if (op_desc.HasAttr("padding_algorithm")) padding_algorithm = - BOOST_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("padding_algorithm")); if (padding_algorithm == "VALID" || padding_algorithm == "SAME") { std::fill(paddings.begin(), paddings.end(), 0); } @@ -129,7 +129,7 @@ class Pool3dOpConverter : public OpConverter { if (op_desc.HasAttr("enable_int8")) { CHECK(op_desc.HasAttr("Input_scale")); float input_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); engine_->SetTensorDynamicRange(input1, input_scale); } diff --git a/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc index 5bfa1170fa1091..38a811b8f521b1 100644 --- a/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/preln_emb_eltwise_layernorm.cc @@ -184,9 +184,9 @@ class PrelnEmbEltwiseLayerNormOpConverter : public OpConverter { .c_str()); free(plugin_ptr); float out_0_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_0_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_0_threshold")); float out_1_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_1_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_1_threshold")); engine_->SetTensorDynamicRange(plugin_layer->getOutput(0), out_0_scale); engine_->SetTensorDynamicRange(plugin_layer->getOutput(1), out_1_scale); diff --git a/paddle/fluid/inference/tensorrt/convert/preln_residual_bias.cc b/paddle/fluid/inference/tensorrt/convert/preln_residual_bias.cc index 7b89b62dc8b66c..abab96ea3b601c 100644 --- a/paddle/fluid/inference/tensorrt/convert/preln_residual_bias.cc +++ b/paddle/fluid/inference/tensorrt/convert/preln_residual_bias.cc @@ -57,7 +57,7 @@ class PrelnResidualBiasOpConverter : public OpConverter { int scale_size = phi::product(scale_dims); int ele_bias_size = phi::product(ele_bias_dims); - float epsilon = BOOST_GET_CONST(float, op_desc.GetAttr("epsilon")); + float epsilon = PADDLE_GET_CONST(float, op_desc.GetAttr("epsilon")); bool with_fp16 = engine_->WithFp16() && !engine_->disable_trt_plugin_fp16(); if (engine_->precision() == AnalysisConfig::Precision::kInt8) { with_fp16 = true; diff --git a/paddle/fluid/inference/tensorrt/convert/prelu_op.cc b/paddle/fluid/inference/tensorrt/convert/prelu_op.cc index 38b01eff6fb198..68f90345706b5e 100644 --- a/paddle/fluid/inference/tensorrt/convert/prelu_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/prelu_op.cc @@ -34,11 +34,11 @@ class PReluOpConverter : public OpConverter { size_t input_num = op_desc.Input("X").size(); auto* input = engine_->GetITensor(op_desc.Input("X")[0]); // Get attrs - std::string mode = BOOST_GET_CONST(std::string, op_desc.GetAttr("mode")); + std::string mode = PADDLE_GET_CONST(std::string, op_desc.GetAttr("mode")); std::string data_format = "NCHW"; if (op_desc.HasAttr("data_format")) { data_format = - BOOST_GET_CONST(std::string, op_desc.GetAttr("data_format")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("data_format")); } auto* alpha_var = scope.FindVar(op_desc.Input("Alpha")[0]); auto* alpha_tensor = alpha_var->GetMutable(); diff --git a/paddle/fluid/inference/tensorrt/convert/recover_padding_op.cc b/paddle/fluid/inference/tensorrt/convert/recover_padding_op.cc index ccd645fb36022b..04df9f6885f95b 100644 --- a/paddle/fluid/inference/tensorrt/convert/recover_padding_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/recover_padding_op.cc @@ -62,7 +62,7 @@ class RecoverPadding : public OpConverter { output_name + ")") .c_str()); float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); engine_->SetTensorDynamicRange(transpose_output, out_scale); plugin_inputs.push_back(transpose_output); } else { diff --git a/paddle/fluid/inference/tensorrt/convert/reduce_op.cc b/paddle/fluid/inference/tensorrt/convert/reduce_op.cc index 828e624b36f9e1..08399b5f437901 100644 --- a/paddle/fluid/inference/tensorrt/convert/reduce_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/reduce_op.cc @@ -53,10 +53,10 @@ class ReduceOpConverter : public OpConverter { nvinfer1::Dims input_shape = x->getDimensions(); int input_dims = input_shape.nbDims; - bool keep_dim = BOOST_GET_CONST(bool, op_desc.GetAttr("keep_dim")); + bool keep_dim = PADDLE_GET_CONST(bool, op_desc.GetAttr("keep_dim")); std::vector dim = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("dim")); - bool reduce_all = BOOST_GET_CONST(bool, op_desc.GetAttr("reduce_all")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("dim")); + bool reduce_all = PADDLE_GET_CONST(bool, op_desc.GetAttr("reduce_all")); nvinfer1::IReduceLayer* layer = nullptr; if (reduce_all) { diff --git a/paddle/fluid/inference/tensorrt/convert/remove_padding_op.cc b/paddle/fluid/inference/tensorrt/convert/remove_padding_op.cc index 85af4e15338fa6..cf955d45555d98 100644 --- a/paddle/fluid/inference/tensorrt/convert/remove_padding_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/remove_padding_op.cc @@ -63,7 +63,7 @@ class RemovePadding : public OpConverter { platform::errors::Fatal("use with_interleaved must be int8.")); } float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); engine_->SetTensorDynamicRange(layer->getOutput(0), out_scale); auto* transpose = TRT_ENGINE_ADD_LAYER(engine_, Shuffle, *(layer->getOutput(0))); diff --git a/paddle/fluid/inference/tensorrt/convert/reshape_op.cc b/paddle/fluid/inference/tensorrt/convert/reshape_op.cc index 00ee5503cc2e21..af25dbc8383002 100644 --- a/paddle/fluid/inference/tensorrt/convert/reshape_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/reshape_op.cc @@ -36,7 +36,7 @@ class ReshapeOpConverter : public OpConverter { // Declare inputs auto* input = engine_->GetITensor(op_desc.Input("X")[0]); std::vector shape = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("shape")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("shape")); int nbDims_num = shape.size(); nvinfer1::Dims reshape_dim; if (engine_->with_dynamic_shape()) { // running the TRT Dynamic Shape mode diff --git a/paddle/fluid/inference/tensorrt/convert/roi_align_op.cc b/paddle/fluid/inference/tensorrt/convert/roi_align_op.cc index 10f9850ee39d97..c2256a52fe7ec5 100644 --- a/paddle/fluid/inference/tensorrt/convert/roi_align_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/roi_align_op.cc @@ -45,14 +45,14 @@ class RoiAlignOpConverter : public OpConverter { std::string output_name = op_desc.Output("Out").front(); const auto pooled_height = - BOOST_GET_CONST(int, op_desc.GetAttr("pooled_height")); + PADDLE_GET_CONST(int, op_desc.GetAttr("pooled_height")); const auto pooled_width = - BOOST_GET_CONST(int, op_desc.GetAttr("pooled_width")); + PADDLE_GET_CONST(int, op_desc.GetAttr("pooled_width")); const auto spatial_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("spatial_scale")); + PADDLE_GET_CONST(float, op_desc.GetAttr("spatial_scale")); const auto sampling_ratio = - BOOST_GET_CONST(int, op_desc.GetAttr("sampling_ratio")); - const auto aligned = BOOST_GET_CONST(bool, op_desc.GetAttr("aligned")); + PADDLE_GET_CONST(int, op_desc.GetAttr("sampling_ratio")); + const auto aligned = PADDLE_GET_CONST(bool, op_desc.GetAttr("aligned")); const auto input_tensor = engine_->GetITensor(input_name); const auto rois_tensor = engine_->GetITensor(rois_name); diff --git a/paddle/fluid/inference/tensorrt/convert/roll_op.cc b/paddle/fluid/inference/tensorrt/convert/roll_op.cc index 606b6a0c30e685..df320c0abcdb65 100644 --- a/paddle/fluid/inference/tensorrt/convert/roll_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/roll_op.cc @@ -42,9 +42,9 @@ class RollOpConverter : public OpConverter { nvinfer1::Dims input_dims = input->getDimensions(); std::vector axis = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("axis")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("axis")); std::vector shifts = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("shifts")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("shifts")); nvinfer1::Dims start; start.nbDims = input_dims.nbDims; diff --git a/paddle/fluid/inference/tensorrt/convert/scale_op.cc b/paddle/fluid/inference/tensorrt/convert/scale_op.cc index 6ba8ba4d5cb4f4..9b0798d9f354fb 100644 --- a/paddle/fluid/inference/tensorrt/convert/scale_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/scale_op.cc @@ -46,9 +46,9 @@ class ScaleOpConverter : public OpConverter { auto input = engine_->GetITensor(input_name); bool bias_after_scale = - BOOST_GET_CONST(bool, op_desc.GetAttr("bias_after_scale")); - float bias = BOOST_GET_CONST(float, op_desc.GetAttr("bias")); - float scale = BOOST_GET_CONST(float, op_desc.GetAttr("scale")); + PADDLE_GET_CONST(bool, op_desc.GetAttr("bias_after_scale")); + float bias = PADDLE_GET_CONST(float, op_desc.GetAttr("bias")); + float scale = PADDLE_GET_CONST(float, op_desc.GetAttr("scale")); auto create_weights = [&](float data, std::string type) -> float* { std::unique_ptr tmp_tensor(new framework::Tensor()); tmp_tensor->Resize({1}); diff --git a/paddle/fluid/inference/tensorrt/convert/shuffle_channel_op.cc b/paddle/fluid/inference/tensorrt/convert/shuffle_channel_op.cc index fea29b2e687c9c..16264e82cf16a9 100644 --- a/paddle/fluid/inference/tensorrt/convert/shuffle_channel_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/shuffle_channel_op.cc @@ -41,7 +41,7 @@ class ShuffleChannelOpConverter : public OpConverter { auto* input = engine_->GetITensor(op_desc.Input("X")[0]); auto input_dims = input->getDimensions(); auto output_name = op_desc.Output("Out")[0]; - int group = BOOST_GET_CONST(int, op_desc.GetAttr("group")); + int group = PADDLE_GET_CONST(int, op_desc.GetAttr("group")); #if IS_TRT_VERSION_GE(8000) if (engine_->with_dynamic_shape()) { diff --git a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc index cf95a4d9b55e0e..25a6861eb67c24 100644 --- a/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc +++ b/paddle/fluid/inference/tensorrt/convert/skip_layernorm.cc @@ -162,7 +162,7 @@ class SkipLayerNormOpConverter : public OpConverter { auto bias_weight = GetFp32Weight("Bias").get(); auto scale_weight = GetFp32Weight("Scale").get(); - float eps = BOOST_GET_CONST(float, op_desc.GetAttr("epsilon")); + float eps = PADDLE_GET_CONST(float, op_desc.GetAttr("epsilon")); bool with_fp16 = engine_->WithFp16() && !engine_->disable_trt_plugin_fp16(); plugin::SkipLayerNormPluginDynamic* plugin = diff --git a/paddle/fluid/inference/tensorrt/convert/slice_op.cc b/paddle/fluid/inference/tensorrt/convert/slice_op.cc index 4f85e4f07cc4e1..3b663ba5a61779 100644 --- a/paddle/fluid/inference/tensorrt/convert/slice_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/slice_op.cc @@ -31,18 +31,18 @@ class SliceOpConverter : public OpConverter { float out_scale = 1; if (op_desc.HasAttr("out_threshold")) { - out_scale = BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); engine_->SetTensorDynamicRange(input, out_scale); } std::vector axes = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("axes")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("axes")); std::vector starts = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("starts")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("starts")); std::vector ends = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("ends")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("ends")); std::vector decrease_axises = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("decrease_axis")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("decrease_axis")); auto input_dims = input->getDimensions(); if (!engine_->with_dynamic_shape()) { diff --git a/paddle/fluid/inference/tensorrt/convert/softmax_op.cc b/paddle/fluid/inference/tensorrt/convert/softmax_op.cc index ada407c843172d..ccdc9097f6cc54 100644 --- a/paddle/fluid/inference/tensorrt/convert/softmax_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/softmax_op.cc @@ -45,7 +45,7 @@ class SoftMaxOpConverter : public OpConverter { nvinfer1::Dims input_shape = input1->getDimensions(); int input_dims = input_shape.nbDims; int axis = op_desc.HasAttr("axis") - ? BOOST_GET_CONST(int, op_desc.GetAttr("axis")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("axis")) : -1; auto* layer = TRT_ENGINE_ADD_LAYER( diff --git a/paddle/fluid/inference/tensorrt/convert/sparse_fc_op.cc b/paddle/fluid/inference/tensorrt/convert/sparse_fc_op.cc index 33801e969172a2..658d7172176438 100644 --- a/paddle/fluid/inference/tensorrt/convert/sparse_fc_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/sparse_fc_op.cc @@ -130,27 +130,27 @@ class SparseFcOpConverter : public OpConverter { auto* Y_t = Y_v->GetMutable(); int x_num_col_dims = op_desc.HasAttr("x_num_col_dims") - ? BOOST_GET_CONST(int, op_desc.GetAttr("x_num_col_dims")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("x_num_col_dims")) : (op_desc.HasAttr("in_num_col_dims") - ? BOOST_GET_CONST(int, op_desc.GetAttr("in_num_col_dims")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("in_num_col_dims")) : 1); const std::string activation_type = op_desc.HasAttr("activation_type") - ? BOOST_GET_CONST(std::string, op_desc.GetAttr("activation_type")) + ? PADDLE_GET_CONST(std::string, op_desc.GetAttr("activation_type")) : ""; float* weight_data = nullptr; bool enable_int8 = op_desc.HasAttr("enable_int8"); bool support_int8 = false; if (op_desc.HasAttr("support_int8")) { - support_int8 = BOOST_GET_CONST(bool, op_desc.GetAttr("support_int8")); + support_int8 = PADDLE_GET_CONST(bool, op_desc.GetAttr("support_int8")); } float in_scale = 0; if (enable_int8 || support_int8) { if (enable_int8) { - in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); } else { // attr X is generated by add_support_int8_pass - in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("X")); + in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("X")); } engine_->SetTensorDynamicRange(X, in_scale); } @@ -202,9 +202,9 @@ class SparseFcOpConverter : public OpConverter { float out_scale = 0; if (enable_int8) { out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); } else { - out_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Out")); + out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Out")); } engine_->SetTensorDynamicRange(fc_layer_int8->getOutput(0), out_scale); @@ -258,9 +258,9 @@ class SparseFcOpConverter : public OpConverter { true, platform::errors::InvalidArgument( "must have out threshold in sparse_fc layers in int8 mode")); - out_scale = BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); } else { - out_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Out")); + out_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Out")); } plugin::SpmmPluginDynamic* plugin = new_spmm_plugin( weight, bias, activation_type, nvinfer1::DataType::kINT8, n); @@ -304,7 +304,7 @@ class SparseFcOpConverter : public OpConverter { bool transpose_y = false; if (op_desc.HasAttr("transpose_Y")) { - transpose_y = BOOST_GET_CONST(bool, op_desc.GetAttr("transpose_Y")); + transpose_y = PADDLE_GET_CONST(bool, op_desc.GetAttr("transpose_Y")); } int weight_w, weight_h; if (!transpose_y) { diff --git a/paddle/fluid/inference/tensorrt/convert/sparse_multihead_matmul_op.cc b/paddle/fluid/inference/tensorrt/convert/sparse_multihead_matmul_op.cc index 4a8d15ef0dbace..086cae495b522f 100644 --- a/paddle/fluid/inference/tensorrt/convert/sparse_multihead_matmul_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/sparse_multihead_matmul_op.cc @@ -61,7 +61,7 @@ class SparseMultiheadMatMulOpConverter : public OpConverter { float in_scale = 0.; if (op_desc.HasAttr("Input_scale")) { - in_scale = BOOST_GET_CONST(float, op_desc.GetAttr("Input_scale")); + in_scale = PADDLE_GET_CONST(float, op_desc.GetAttr("Input_scale")); engine_->SetTensorDynamicRange(input, in_scale); } weight_data = const_cast(static_cast( @@ -91,7 +91,7 @@ class SparseMultiheadMatMulOpConverter : public OpConverter { }; tranpose_weight(weight_data_tmp.data(), weight_data, m, n); - int head_number = BOOST_GET_CONST(int, op_desc.GetAttr("head_number")); + int head_number = PADDLE_GET_CONST(int, op_desc.GetAttr("head_number")); bool with_fp16 = engine_->WithFp16() && !engine_->disable_trt_plugin_fp16(); nvinfer1::ILayer* layer = nullptr; @@ -133,11 +133,11 @@ class SparseMultiheadMatMulOpConverter : public OpConverter { platform::errors::InvalidArgument( "must have out_threshold in multihead layers in int8 mode")); float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); engine_->SetTensorDynamicRange(fc_layer->getOutput(0), out_scale); if (qkv2context_plugin_int8) { dp_probs = - BOOST_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; + PADDLE_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; } auto creator = GetPluginRegistry()->getPluginCreator( "CustomQKVToContextPluginDynamic", "3"); @@ -266,11 +266,11 @@ class SparseMultiheadMatMulOpConverter : public OpConverter { "must have out threshold in multihead layers " "in int8 mode")); float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); engine_->SetTensorDynamicRange(fc_layer->getOutput(0), out_scale); if (qkv2context_plugin_int8) { dp_probs = - BOOST_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; + PADDLE_GET_CONST(float, op_desc.GetAttr("dp_probs")) / 127.0; } } auto creator = GetPluginRegistry()->getPluginCreator( @@ -431,7 +431,7 @@ class SparseMultiheadMatMulOpConverter : public OpConverter { platform::errors::InvalidArgument( "must have out threshold in multihead layers in int8 mode")); float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("fc_out_threshold")); engine_->SetTensorDynamicRange(fc_layer->getOutput(0), out_scale); } fc_layer->setName( @@ -442,7 +442,7 @@ class SparseMultiheadMatMulOpConverter : public OpConverter { // add qkv to context int head_size = hidden_out / head_number; - float scale = BOOST_GET_CONST(float, op_desc.GetAttr("alpha")); + float scale = PADDLE_GET_CONST(float, op_desc.GetAttr("alpha")); std::vector plugin_inputs; plugin_inputs.push_back(fc_layer->getOutput(0)); diff --git a/paddle/fluid/inference/tensorrt/convert/split_op.cc b/paddle/fluid/inference/tensorrt/convert/split_op.cc index 856dddfa263e64..8ad4158bd04dd3 100644 --- a/paddle/fluid/inference/tensorrt/convert/split_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/split_op.cc @@ -33,13 +33,13 @@ class SplitOpConverter : public OpConverter { size_t output_num = op_desc.Output("Out").size(); // Get Attrs - int axis = BOOST_GET_CONST(int, op_desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, op_desc.GetAttr("axis")); std::vector output_lengths = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("sections")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("sections")); int num = 0; if (op_desc.HasAttr("num")) { - num = BOOST_GET_CONST(int, op_desc.GetAttr("num")); + num = PADDLE_GET_CONST(int, op_desc.GetAttr("num")); } nvinfer1::ITensor* shape_tensor = nullptr; if (engine_->with_dynamic_shape()) { diff --git a/paddle/fluid/inference/tensorrt/convert/squeeze2_op.cc b/paddle/fluid/inference/tensorrt/convert/squeeze2_op.cc index 9b494b0331880d..7ab370128bd62d 100644 --- a/paddle/fluid/inference/tensorrt/convert/squeeze2_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/squeeze2_op.cc @@ -33,7 +33,7 @@ class Squeeze2OpConverter : public OpConverter { // Get Attrs std::vector axes = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("axes")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("axes")); PADDLE_ENFORCE_GT( axes.size(), 0, diff --git a/paddle/fluid/inference/tensorrt/convert/stack_op.cc b/paddle/fluid/inference/tensorrt/convert/stack_op.cc index c38f73c6a311ef..e4d3003b53448b 100644 --- a/paddle/fluid/inference/tensorrt/convert/stack_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/stack_op.cc @@ -48,12 +48,12 @@ class StackOpConverter : public OpConverter { inputs[i] = engine_->GetITensor(input[i]); if (op_desc.HasAttr("out_threshold")) { float out_scale = - BOOST_GET_CONST(float, op_desc.GetAttr("out_threshold")); + PADDLE_GET_CONST(float, op_desc.GetAttr("out_threshold")); engine_->SetTensorDynamicRange(inputs[i], out_scale); } } - int axis = BOOST_GET_CONST(int, op_desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, op_desc.GetAttr("axis")); if (axis < 0) { axis = axis + inputs[0]->getDimensions().nbDims + 1; } diff --git a/paddle/fluid/inference/tensorrt/convert/strided_slice_op.cc b/paddle/fluid/inference/tensorrt/convert/strided_slice_op.cc index 21548bf348223c..cb67957c79cbf4 100644 --- a/paddle/fluid/inference/tensorrt/convert/strided_slice_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/strided_slice_op.cc @@ -42,13 +42,13 @@ class StridedSliceOpConverter : public OpConverter { nvinfer1::Dims input_dims = input->getDimensions(); auto output_name = op_desc.Output("Out")[0]; std::vector axes = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("axes")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("axes")); std::vector starts = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("starts")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("starts")); std::vector ends = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("ends")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("ends")); std::vector strides = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("strides")); int axes_size = axes.size(); nvinfer1::Dims start; nvinfer1::Dims stride; diff --git a/paddle/fluid/inference/tensorrt/convert/swish_op.cc b/paddle/fluid/inference/tensorrt/convert/swish_op.cc index 7cc93a3325cd4e..722b42bd895e43 100644 --- a/paddle/fluid/inference/tensorrt/convert/swish_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/swish_op.cc @@ -59,7 +59,7 @@ class SwishOpConverter : public OpConverter { "But received Out's size %u.", output_num)); // Get attrs - float beta = BOOST_GET_CONST(float, op_desc.GetAttr("beta")); + float beta = PADDLE_GET_CONST(float, op_desc.GetAttr("beta")); nvinfer1::ILayer* layer = nullptr; if (engine_->with_dynamic_shape()) { diff --git a/paddle/fluid/inference/tensorrt/convert/tile_op.cc b/paddle/fluid/inference/tensorrt/convert/tile_op.cc index b3d07f4f87061a..9d013240d37fdb 100644 --- a/paddle/fluid/inference/tensorrt/convert/tile_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/tile_op.cc @@ -40,7 +40,7 @@ class TileOpConverter : public OpConverter { auto* input = engine_->GetITensor(op_desc.Input("X")[0]); nvinfer1::Dims input_shape = input->getDimensions(); std::vector repeat_times = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("repeat_times")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("repeat_times")); nvinfer1::Dims output_dim = input_shape; nvinfer1::Dims output_stride; diff --git a/paddle/fluid/inference/tensorrt/convert/top_k_op.cc b/paddle/fluid/inference/tensorrt/convert/top_k_op.cc index 66f8b4611a790c..b5c383fd674a5b 100644 --- a/paddle/fluid/inference/tensorrt/convert/top_k_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/top_k_op.cc @@ -50,7 +50,7 @@ class TopKOpConverter : public OpConverter { auto* input_tensor = engine_->GetITensor(op_desc.Input("X")[0]); const int k = op_desc.HasAttr("k") - ? BOOST_GET_CONST(int, op_desc.GetAttr("k")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("k")) : 1.0f; nvinfer1::Dims input_dims = input_tensor->getDimensions(); @@ -83,14 +83,15 @@ class TopKv2OpConverter : public OpConverter { auto* input_tensor = engine_->GetITensor(op_desc.Input("X")[0]); const int k = op_desc.HasAttr("k") - ? BOOST_GET_CONST(int, op_desc.GetAttr("k")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("k")) : 1.0f; const int axis = op_desc.HasAttr("axis") - ? BOOST_GET_CONST(int, op_desc.GetAttr("axis")) + ? PADDLE_GET_CONST(int, op_desc.GetAttr("axis")) : 1.0f; - const bool largest = op_desc.HasAttr("largest") - ? BOOST_GET_CONST(bool, op_desc.GetAttr("largest")) - : true; + const bool largest = + op_desc.HasAttr("largest") + ? PADDLE_GET_CONST(bool, op_desc.GetAttr("largest")) + : true; auto flag = largest ? nvinfer1::TopKOperation::kMAX : nvinfer1::TopKOperation::kMIN; nvinfer1::ITopKLayer* layer = nullptr; diff --git a/paddle/fluid/inference/tensorrt/convert/transpose_op.cc b/paddle/fluid/inference/tensorrt/convert/transpose_op.cc index 19cc2981d85c73..4550bb938f54d3 100644 --- a/paddle/fluid/inference/tensorrt/convert/transpose_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/transpose_op.cc @@ -37,7 +37,7 @@ class TransposeOpConverter : public OpConverter { auto* input = engine_->GetITensor(op_desc.Input("X")[0]); int dims = input->getDimensions().nbDims; std::vector axis = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("axis")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("axis")); if (!engine_->with_dynamic_shape()) { for (size_t i = 1; i < axis.size(); i++) { axis[i]--; diff --git a/paddle/fluid/inference/tensorrt/convert/unsqueeze2_op.cc b/paddle/fluid/inference/tensorrt/convert/unsqueeze2_op.cc index eb25971534f79d..cbd6d37bfb2dae 100644 --- a/paddle/fluid/inference/tensorrt/convert/unsqueeze2_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/unsqueeze2_op.cc @@ -33,7 +33,7 @@ class Unsqueeze2OpConverter : public OpConverter { // Get Attrs std::vector axes = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("axes")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("axes")); PADDLE_ENFORCE_GT( axes.size(), 0, diff --git a/paddle/fluid/inference/tensorrt/convert/yolo_box_head_op.cc b/paddle/fluid/inference/tensorrt/convert/yolo_box_head_op.cc index 2987c8048046fe..6b58ef17a6a5a0 100644 --- a/paddle/fluid/inference/tensorrt/convert/yolo_box_head_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/yolo_box_head_op.cc @@ -35,8 +35,8 @@ class YoloBoxHeadOpConverter : public OpConverter { framework::OpDesc op_desc(op, nullptr); auto* x_tensor = engine_->GetITensor(op_desc.Input("X").front()); std::vector anchors = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("anchors")); - int class_num = BOOST_GET_CONST(int, op_desc.GetAttr("class_num")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("anchors")); + int class_num = PADDLE_GET_CONST(int, op_desc.GetAttr("class_num")); auto* yolo_box_plugin = new plugin::YoloBoxHeadPlugin(anchors, class_num); std::vector yolo_box_inputs; diff --git a/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc b/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc index 4d690814bda591..8477eefbb3b4f6 100644 --- a/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/yolo_box_op.cc @@ -41,21 +41,21 @@ class YoloBoxOpConverter : public OpConverter { auto* X_tensor = engine_->GetITensor(X); auto* img_size_tensor = engine_->GetITensor(img_size); - int class_num = BOOST_GET_CONST(int, op_desc.GetAttr("class_num")); + int class_num = PADDLE_GET_CONST(int, op_desc.GetAttr("class_num")); std::vector anchors = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("anchors")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("anchors")); int downsample_ratio = - BOOST_GET_CONST(int, op_desc.GetAttr("downsample_ratio")); - float conf_thresh = BOOST_GET_CONST(float, op_desc.GetAttr("conf_thresh")); - bool clip_bbox = BOOST_GET_CONST(bool, op_desc.GetAttr("clip_bbox")); - float scale_x_y = BOOST_GET_CONST(float, op_desc.GetAttr("scale_x_y")); + PADDLE_GET_CONST(int, op_desc.GetAttr("downsample_ratio")); + float conf_thresh = PADDLE_GET_CONST(float, op_desc.GetAttr("conf_thresh")); + bool clip_bbox = PADDLE_GET_CONST(bool, op_desc.GetAttr("clip_bbox")); + float scale_x_y = PADDLE_GET_CONST(float, op_desc.GetAttr("scale_x_y")); bool iou_aware = op_desc.HasAttr("iou_aware") - ? BOOST_GET_CONST(bool, op_desc.GetAttr("iou_aware")) + ? PADDLE_GET_CONST(bool, op_desc.GetAttr("iou_aware")) : false; float iou_aware_factor = op_desc.HasAttr("iou_aware_factor") - ? BOOST_GET_CONST(float, op_desc.GetAttr("iou_aware_factor")) + ? PADDLE_GET_CONST(float, op_desc.GetAttr("iou_aware_factor")) : 0.5; int type_id = static_cast(engine_->WithFp16()); diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index eaef331356575f..c38d57cf26b2a8 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -286,7 +286,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, const framework::OpDesc desc = *node->Op(); // do not support the op which is labeled the `skip_quant` if ((desc.HasAttr("namescope") && - BOOST_GET_CONST(std::string, desc.GetAttr("op_namescope")) == + PADDLE_GET_CONST(std::string, desc.GetAttr("op_namescope")) == "/skip_quant_2/") || desc.HasAttr("skip_quant")) return false; @@ -350,7 +350,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (op_type == "pool2d") { std::vector paddings = - BOOST_GET_CONST(std::vector, desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("paddings")); if (paddings.size() > 2) { return false; } @@ -366,7 +366,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } if (desc.HasAttr("data_format")) { std::string data_format = - BOOST_GET_CONST(std::string, desc.GetAttr("data_format")); + PADDLE_GET_CONST(std::string, desc.GetAttr("data_format")); if (data_format == "NHWC" || data_format == "NDHWC") { return false; } @@ -375,7 +375,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } else { std::string pool_type = - BOOST_GET_CONST(std::string, desc.GetAttr("pooling_type")); + PADDLE_GET_CONST(std::string, desc.GetAttr("pooling_type")); if (pool_type != "max" && pool_type != "avg") { VLOG(3) << "Wrong pool op type, the trt do not support the " << pool_type << " pool type."; @@ -383,11 +383,11 @@ bool OpTeller::Tell(const framework::ir::Node* node, } if (pool_type == "avg") { if (desc.HasAttr("global_pooling")) { - if (!BOOST_GET_CONST(bool, desc.GetAttr("global_pooling"))) { + if (!PADDLE_GET_CONST(bool, desc.GetAttr("global_pooling"))) { if (desc.HasAttr("exclusive")) { - if (BOOST_GET_CONST(bool, desc.GetAttr("exclusive"))) { + if (PADDLE_GET_CONST(bool, desc.GetAttr("exclusive"))) { std::vector ksize = - BOOST_GET_CONST(std::vector, desc.GetAttr("ksize")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("ksize")); for (size_t i = 0; i < ksize.size(); i++) { if (ksize[i] <= paddings[i]) { VLOG(3) << "the padding size should be less than the " @@ -436,7 +436,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } else { const std::vector dilations = - BOOST_GET_CONST(std::vector, desc.GetAttr("dilations")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("dilations")); if (dilations[0] != 1 || dilations[1] != 1) { VLOG(3) << "In conv2d_transpose, Dilations must be (1, 1) for " "tensorRT, but given (" @@ -458,10 +458,10 @@ bool OpTeller::Tell(const framework::ir::Node* node, op_type == "depthwise_conv2d") { if (desc.HasAttr("padding_algorithm") && with_dynamic_shape) { auto padding_algorithm = - BOOST_GET_CONST(std::string, desc.GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, desc.GetAttr("padding_algorithm")); if (padding_algorithm == "SAME" && desc.HasAttr("strides")) { const std::vector strides = - BOOST_GET_CONST(std::vector, desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("strides")); // there is no issue if strides.size() less than 2 if (strides.size() > 1) { for (size_t i = 0; i < strides.size(); i++) { @@ -494,7 +494,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, auto* filter_desc = block->FindVar(filter_name); const auto filter_shape = filter_desc->GetShape(); - int groups = BOOST_GET_CONST(int, desc.GetAttr("groups")); + int groups = PADDLE_GET_CONST(int, desc.GetAttr("groups")); if (input_shape[1] != filter_shape[1] * groups) { VLOG(3) << "The number of input channels should be equal to filter " << "channels * groups. But got input channels " @@ -503,7 +503,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } const std::vector strides = - BOOST_GET_CONST(std::vector, desc.GetAttr("strides")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("strides")); if (strides.size() != 2) { VLOG(3) << "The size of strides should be 2, but got " << strides.size(); @@ -511,7 +511,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } const std::vector paddings = - BOOST_GET_CONST(std::vector, desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("paddings")); if (paddings.size() != 2) { VLOG(3) << "The size of paddings shoule be 2, but got " << paddings.size(); @@ -584,7 +584,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (!desc.HasAttr("axis")) { return false; } - int axis = BOOST_GET_CONST(int, desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, desc.GetAttr("axis")); if (!with_dynamic_shape) { if (axis == 0) return false; } @@ -600,7 +600,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } std::vector axis = - BOOST_GET_CONST(std::vector, desc.GetAttr("axis")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("axis")); if (!with_dynamic_shape && axis[0] != 0) return false; if (axis.size() >= nvinfer1::Dims::MAX_DIMS) return false; @@ -646,14 +646,14 @@ bool OpTeller::Tell(const framework::ir::Node* node, #else if (with_dynamic_shape) return false; #endif - int axis = BOOST_GET_CONST(int, desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, desc.GetAttr("axis")); if (axis != 1) return false; } } if (op_type == "flatten_contiguous_range") { if (!with_dynamic_shape) { - int start_axis = BOOST_GET_CONST(int, desc.GetAttr("start_axis")); - int stop_axis = BOOST_GET_CONST(int, desc.GetAttr("stop_axis")); + int start_axis = PADDLE_GET_CONST(int, desc.GetAttr("start_axis")); + int stop_axis = PADDLE_GET_CONST(int, desc.GetAttr("stop_axis")); auto x_var_name = desc.Input("X")[0]; auto* block = desc.Block(); if (block == nullptr) { @@ -768,17 +768,17 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (op_type == "arg_max") { int axis = desc.HasAttr("axis") - ? BOOST_GET_CONST(int64_t, desc.GetAttr("axis")) + ? PADDLE_GET_CONST(int64_t, desc.GetAttr("axis")) : -1; - bool flatten = BOOST_GET_CONST(bool, desc.GetAttr("flatten")); - int dtype = BOOST_GET_CONST(int, desc.GetAttr("dtype")); + bool flatten = PADDLE_GET_CONST(bool, desc.GetAttr("flatten")); + int dtype = PADDLE_GET_CONST(int, desc.GetAttr("dtype")); if (axis == 0 || flatten || dtype != 2) return false; } if (op_type == "affine_channel") { if (!desc.HasAttr("data_layout")) return false; auto data_layout = framework::StringToDataLayout( - BOOST_GET_CONST(std::string, desc.GetAttr("data_layout"))); + PADDLE_GET_CONST(std::string, desc.GetAttr("data_layout"))); if (data_layout != framework::DataLayout::kNCHW) return false; auto* block = desc.Block(); @@ -833,13 +833,13 @@ bool OpTeller::Tell(const framework::ir::Node* node, // TODO(wangxinxin08): tricky solution because the outputs of batchedNMS // plugin are not constient with those of multiclass_nms3 if (desc.HasAttr("nms_eta") == false) return false; - auto nms_eta = BOOST_GET_CONST(float, desc.GetAttr("nms_eta")); + auto nms_eta = PADDLE_GET_CONST(float, desc.GetAttr("nms_eta")); if (nms_eta <= 1.0) return false; - auto nms_top_k = BOOST_GET_CONST(int, desc.GetAttr("nms_top_k")); + auto nms_top_k = PADDLE_GET_CONST(int, desc.GetAttr("nms_top_k")); if (nms_top_k < 0) return false; - auto keep_top_k = BOOST_GET_CONST(int, desc.GetAttr("keep_top_k")); + auto keep_top_k = PADDLE_GET_CONST(int, desc.GetAttr("keep_top_k")); if (keep_top_k < 0) return false; auto registry = GetPluginRegistry(); @@ -854,18 +854,19 @@ bool OpTeller::Tell(const framework::ir::Node* node, } if (desc.HasAttr("data_layout")) { auto data_layout = framework::StringToDataLayout( - BOOST_GET_CONST(std::string, desc.GetAttr("data_layout"))); + PADDLE_GET_CONST(std::string, desc.GetAttr("data_layout"))); if (data_layout != framework::DataLayout::kNCHW && data_layout != framework::DataLayout::kNHWC) return false; } auto interp_method = - BOOST_GET_CONST(std::string, desc.GetAttr("interp_method")); + PADDLE_GET_CONST(std::string, desc.GetAttr("interp_method")); if (interp_method != "nearest") return false; - auto scale = BOOST_GET_CONST(float, desc.GetAttr("scale")); - auto out_h = BOOST_GET_CONST(int, desc.GetAttr("out_h")); - auto out_w = BOOST_GET_CONST(int, desc.GetAttr("out_w")); - auto align_corners = BOOST_GET_CONST(bool, desc.GetAttr("align_corners")); + auto scale = PADDLE_GET_CONST(float, desc.GetAttr("scale")); + auto out_h = PADDLE_GET_CONST(int, desc.GetAttr("out_h")); + auto out_w = PADDLE_GET_CONST(int, desc.GetAttr("out_w")); + auto align_corners = + PADDLE_GET_CONST(bool, desc.GetAttr("align_corners")); if (!(scale > 0.f && (out_h <= 0 && out_w <= 0))) { if (out_h <= 0) { VLOG(3) << "out_h must be greater than 0 if scale is not set."; @@ -898,16 +899,16 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (!desc.HasAttr(attr)) return false; } auto data_layout = framework::StringToDataLayout( - BOOST_GET_CONST(std::string, desc.GetAttr("data_layout"))); + PADDLE_GET_CONST(std::string, desc.GetAttr("data_layout"))); if (data_layout != framework::DataLayout::kNCHW && data_layout != framework::DataLayout::kNHWC) return false; auto interp_method = - BOOST_GET_CONST(std::string, desc.GetAttr("interp_method")); + PADDLE_GET_CONST(std::string, desc.GetAttr("interp_method")); if (interp_method != "nearest") return false; - auto scale = BOOST_GET_CONST(std::vector, desc.GetAttr("scale")); - auto out_h = BOOST_GET_CONST(int, desc.GetAttr("out_h")); - auto out_w = BOOST_GET_CONST(int, desc.GetAttr("out_w")); + auto scale = PADDLE_GET_CONST(std::vector, desc.GetAttr("scale")); + auto out_h = PADDLE_GET_CONST(int, desc.GetAttr("out_h")); + auto out_w = PADDLE_GET_CONST(int, desc.GetAttr("out_w")); if (!(out_h > 0 && out_w > 0)) { if (scale.size() < 2) return false; if (scale[0] <= 0.f || scale[1] <= 0.f) { @@ -952,7 +953,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } auto data_layout = framework::StringToDataLayout( - BOOST_GET_CONST(std::string, desc.GetAttr("data_layout"))); + PADDLE_GET_CONST(std::string, desc.GetAttr("data_layout"))); if (data_layout != framework::DataLayout::kNCHW && data_layout != framework::DataLayout::kNHWC) { VLOG(3) << "The op_type " << op_type @@ -960,14 +961,15 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } auto interp_method = - BOOST_GET_CONST(std::string, desc.GetAttr("interp_method")); + PADDLE_GET_CONST(std::string, desc.GetAttr("interp_method")); if (interp_method != "bilinear") { VLOG(3) << "The interp_method of op_type " << op_type << " is not bilinear"; return false; } - auto align_corners = BOOST_GET_CONST(bool, desc.GetAttr("align_corners")); + auto align_corners = + PADDLE_GET_CONST(bool, desc.GetAttr("align_corners")); if (align_corners != false) { VLOG(3) << "The bilinear_interp_v2 only supports align_corners with false."; @@ -979,7 +981,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (has_scale_input_size && desc.Input("Scale").size() != 1) { const std::vector scale = - BOOST_GET_CONST(std::vector, desc.GetAttr("scale")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("scale")); if (scale.size() <= 1) { if (!desc.HasAttr("out_h") || !desc.HasAttr("out_w")) { VLOG(3) << "The op_type " << op_type @@ -987,8 +989,8 @@ bool OpTeller::Tell(const framework::ir::Node* node, "out_h / out_w, it will return false"; return false; } - auto out_h = BOOST_GET_CONST(int, desc.GetAttr("out_h")); - auto out_w = BOOST_GET_CONST(int, desc.GetAttr("out_w")); + auto out_h = PADDLE_GET_CONST(int, desc.GetAttr("out_h")); + auto out_w = PADDLE_GET_CONST(int, desc.GetAttr("out_w")); if (!(out_h <= 0 && out_w <= 0)) { if (out_h <= 0) { VLOG(3) << "The op_type " << op_type @@ -1031,7 +1033,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (op_type == "squeeze2") { std::vector axes; if (desc.HasAttr("axes")) { - axes = BOOST_GET_CONST(std::vector, desc.GetAttr("axes")); + axes = PADDLE_GET_CONST(std::vector, desc.GetAttr("axes")); } if (axes.size() == 0) { VLOG(3) << "The necessary attributes of the squeeze2 operator axes is " @@ -1050,7 +1052,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (op_type == "unsqueeze2") { std::vector axes; if (desc.HasAttr("axes")) { - axes = BOOST_GET_CONST(std::vector, desc.GetAttr("axes")); + axes = PADDLE_GET_CONST(std::vector, desc.GetAttr("axes")); } if (axes.size() == 0) { VLOG(3) << "The necessary attributes of the squeeze2 operator axes is " @@ -1123,7 +1125,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (!desc.HasAttr("axis")) { return false; } - int axis = BOOST_GET_CONST(int, desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, desc.GetAttr("axis")); if (axis == 0) { VLOG(3) << "Invalid split axis. Split on batch is not supported in " @@ -1144,11 +1146,11 @@ bool OpTeller::Tell(const framework::ir::Node* node, std::vector output_lengths; int num = 0; if (desc.HasAttr("num")) { - num = BOOST_GET_CONST(int, desc.GetAttr("num")); + num = PADDLE_GET_CONST(int, desc.GetAttr("num")); } if (desc.HasAttr("sections")) { output_lengths = - BOOST_GET_CONST(std::vector, desc.GetAttr("sections")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("sections")); } if (output_lengths.size() == 0 && num == 0) { VLOG(3) << "sections and num cannot be equal to 0 at the same time"; @@ -1239,7 +1241,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (op_type == "slice") { if (desc.HasAttr("decrease_axis")) { std::vector decrease_axis = - BOOST_GET_CONST(std::vector, desc.GetAttr("decrease_axis")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("decrease_axis")); if (!with_dynamic_shape) { if (decrease_axis.end() != std::find(decrease_axis.begin(), decrease_axis.end(), 0)) { @@ -1255,11 +1257,11 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } else { std::vector axes = - BOOST_GET_CONST(std::vector, desc.GetAttr("axes")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("axes")); std::vector starts = - BOOST_GET_CONST(std::vector, desc.GetAttr("starts")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("starts")); std::vector ends = - BOOST_GET_CONST(std::vector, desc.GetAttr("ends")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("ends")); if (axes.size() != starts.size() || axes.size() != ends.size()) { VLOG(3) << "The shape of attributes of the slice operator axes " @@ -1404,7 +1406,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, #if IS_TRT_VERSION_LT(7000) if (desc.HasAttr("approximate")) { VLOG(3) << "approximate gelu op needs TensorRT 7.0 and after"; - if (BOOST_GET_CONST(bool, desc.GetAttr("approximate"))) return false; + if (PADDLE_GET_CONST(bool, desc.GetAttr("approximate"))) return false; } #endif @@ -1506,7 +1508,8 @@ bool OpTeller::Tell(const framework::ir::Node* node, } if (op_type == "pad") { - const float pad_value = BOOST_GET_CONST(float, desc.GetAttr("pad_value")); + const float pad_value = + PADDLE_GET_CONST(float, desc.GetAttr("pad_value")); if (pad_value != 0.0f) { VLOG(3) << "The pad layer of TRT only support zero."; return false; @@ -1527,7 +1530,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } int nbDims = shape.size(); std::vector paddings = - BOOST_GET_CONST(std::vector, desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("paddings")); int pad_size = paddings.size(); if (nbDims < 2) { return false; @@ -1652,15 +1655,15 @@ bool OpTeller::Tell(const framework::ir::Node* node, } const auto pooled_height = - BOOST_GET_CONST(int, desc.GetAttr("pooled_height")); + PADDLE_GET_CONST(int, desc.GetAttr("pooled_height")); if (pooled_height <= 0) return false; const auto pooled_width = - BOOST_GET_CONST(int, desc.GetAttr("pooled_width")); + PADDLE_GET_CONST(int, desc.GetAttr("pooled_width")); if (pooled_width <= 0) return false; const auto spatial_scale = - BOOST_GET_CONST(float, desc.GetAttr("spatial_scale")); + PADDLE_GET_CONST(float, desc.GetAttr("spatial_scale")); if (spatial_scale <= 0.f) return false; auto roi_align_inputs = desc.Inputs(); @@ -1721,7 +1724,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, auto* input_desc = block->FindVar(desc.Input("Input").front()); const auto input_shape = input_desc->GetShape(); const auto head_number = - BOOST_GET_CONST(int, desc.GetAttr("head_number")); + PADDLE_GET_CONST(int, desc.GetAttr("head_number")); auto* biasqk_desc = block->FindVar(desc.Input("BiasQK").front()); const auto biasqk_shape = biasqk_desc->GetShape(); @@ -1778,7 +1781,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, // y_num_col_dims ==1 if (desc.HasAttr("y_num_col_dims")) { int y_num_col_dims = - BOOST_GET_CONST(int, desc.GetAttr("y_num_col_dims")); + PADDLE_GET_CONST(int, desc.GetAttr("y_num_col_dims")); if (y_num_col_dims != 1) { VLOG(3) << " fc_op'y_num_col_dims must be 1, but y_num_col_dims = " << y_num_col_dims; @@ -1788,9 +1791,9 @@ bool OpTeller::Tell(const framework::ir::Node* node, */ int x_num_col_dims = desc.HasAttr("x_num_col_dims") - ? BOOST_GET_CONST(int, desc.GetAttr("x_num_col_dims")) + ? PADDLE_GET_CONST(int, desc.GetAttr("x_num_col_dims")) : (desc.HasAttr("in_num_col_dims") - ? BOOST_GET_CONST(int, desc.GetAttr("in_num_col_dims")) + ? PADDLE_GET_CONST(int, desc.GetAttr("in_num_col_dims")) : 1); if (x_num_col_dims < 1) { VLOG(3) << "fc_op expects x_num_col_dims >= 1, " @@ -1817,7 +1820,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } } std::vector shape = - BOOST_GET_CONST(std::vector, desc.GetAttr("shape")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("shape")); if (shape.size() >= nvinfer1::Dims::MAX_DIMS) return false; if (!with_dynamic_shape) { if (shape.size() == 1) { @@ -1892,17 +1895,17 @@ bool OpTeller::Tell(const framework::ir::Node* node, // The batch size dimension cannot be reduced if it's not dynamic shape. auto* x_var_desc = block->FindVar(desc.Input("X")[0]); if (!with_dynamic_shape) { - if (BOOST_GET_CONST(bool, desc.GetAttr("reduce_all"))) return false; + if (PADDLE_GET_CONST(bool, desc.GetAttr("reduce_all"))) return false; std::vector dim = - BOOST_GET_CONST(std::vector, desc.GetAttr("dim")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("dim")); const auto input_shape = x_var_desc->GetShape(); for (auto x : dim) { if (x == 0 || (x + input_shape.size() == 0)) return false; } } else { - if (BOOST_GET_CONST(bool, desc.GetAttr("reduce_all")) && - !BOOST_GET_CONST(bool, desc.GetAttr("keep_dim"))) + if (PADDLE_GET_CONST(bool, desc.GetAttr("reduce_all")) && + !PADDLE_GET_CONST(bool, desc.GetAttr("keep_dim"))) return false; } @@ -1946,7 +1949,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, // output_padding is set when stride > 1 if (desc.HasAttr("output_padding")) { const std::vector output_padding = - BOOST_GET_CONST(std::vector, desc.GetAttr("output_padding")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("output_padding")); if (output_padding.size() > 0) { int max_padding = *std::max_element(output_padding.begin(), output_padding.end()); @@ -1958,7 +1961,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, if (op_type == "conv3d" || op_type == "conv3d_transpose") { if (desc.HasAttr("padding_algorithm")) { std::string padding_algorithm = - BOOST_GET_CONST(std::string, desc.GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, desc.GetAttr("padding_algorithm")); // trt error is arised if conv3d_transpose and SAME if (op_type == "conv3d_transpose" && padding_algorithm == "SAME" && @@ -1974,7 +1977,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } #endif std::vector paddings = - BOOST_GET_CONST(std::vector, desc.GetAttr("paddings")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("paddings")); // conv3d and conv3d_transpose need padding check if (paddings.size() > 3) return false; @@ -1996,7 +1999,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } else { const std::vector dilations = - BOOST_GET_CONST(std::vector, desc.GetAttr("dilations")); + PADDLE_GET_CONST(std::vector, desc.GetAttr("dilations")); if (dilations[0] != 1 || dilations[1] != 1 || dilations[2] != 1) { VLOG(3) << "In conv3d_transpose, Dilations must be (1, 1, 1) for " "tensorRT, but given (" @@ -2035,8 +2038,8 @@ bool OpTeller::Tell(const framework::ir::Node* node, } if (op_type == "cast") { - int in_dtype = BOOST_GET_CONST(int, desc.GetAttr("in_dtype")); - int out_dtype = BOOST_GET_CONST(int, desc.GetAttr("out_dtype")); + int in_dtype = PADDLE_GET_CONST(int, desc.GetAttr("in_dtype")); + int out_dtype = PADDLE_GET_CONST(int, desc.GetAttr("out_dtype")); if ((in_dtype == 4 || in_dtype == 5) && out_dtype == 4) { VLOG(3) << "unsupport data type conversion"; return false; @@ -2062,7 +2065,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, return false; } if (desc.HasAttr("axis")) { - int axis = BOOST_GET_CONST(int, desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, desc.GetAttr("axis")); if (axis == 0) { VLOG(3) << "top_k_v2 does not support axis == 0 in " "tensorrt"; @@ -2070,7 +2073,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, } } if (desc.HasAttr("sorted")) { - bool sorted = BOOST_GET_CONST(bool, desc.GetAttr("sorted")); + bool sorted = PADDLE_GET_CONST(bool, desc.GetAttr("sorted")); if (!sorted) { VLOG(3) << "top_k_v2 does not support results not sorted in " "tensorrt"; @@ -2094,7 +2097,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, VLOG(3) << "compare is not supported when TensorRT < 8.0"; return false; #else - int axis = BOOST_GET_CONST(int, desc.GetAttr("axis")); + int axis = PADDLE_GET_CONST(int, desc.GetAttr("axis")); if (axis == 0) { return false; } diff --git a/paddle/fluid/operators/activation_op.cc b/paddle/fluid/operators/activation_op.cc index 4a7f6cfbf0b315..72f57b661e911a 100644 --- a/paddle/fluid/operators/activation_op.cc +++ b/paddle/fluid/operators/activation_op.cc @@ -79,7 +79,7 @@ class ActivationGradOpMaker : public framework::SingleGradOpMaker { static_cast(ActBwdOpFwdDeps::kDepX)) || FLAGS_use_mkldnn || (op->HasAttr("use_mkldnn") && - BOOST_GET_CONST(bool, op->GetAttr("use_mkldnn")))) { + PADDLE_GET_CONST(bool, op->GetAttr("use_mkldnn")))) { op->SetInput("X", this->Input("X")); // x } diff --git a/paddle/fluid/operators/batch_norm_op.cc b/paddle/fluid/operators/batch_norm_op.cc index 90c6abd3a6aa5d..9ad55c21f10254 100644 --- a/paddle/fluid/operators/batch_norm_op.cc +++ b/paddle/fluid/operators/batch_norm_op.cc @@ -460,8 +460,8 @@ void BatchNormGradMaker::Apply(GradOpPtr op) const { } // used when setting use_global_stats True during training - if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats")) || - BOOST_GET_CONST(bool, this->GetAttr("is_test"))) { + if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats")) || + PADDLE_GET_CONST(bool, this->GetAttr("is_test"))) { op->SetInput("Mean", this->Output("MeanOut")); op->SetInput("Variance", this->Output("VarianceOut")); } @@ -480,7 +480,7 @@ void BatchNormDoubleGradMaker::Apply(GradOpPtr op) const { op->SetInput("Scale", this->Input("Scale")); op->SetInput("SavedMean", this->Input("SavedMean")); op->SetInput("SavedVariance", this->Input("SavedVariance")); - if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats"))) { + if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats"))) { op->SetInput("Mean", this->Input("Mean")); op->SetInput("Variance", this->Input("Variance")); } diff --git a/paddle/fluid/operators/benchmark/op_tester.cc b/paddle/fluid/operators/benchmark/op_tester.cc index 18e42561e2f9c4..0ff6b6dca9757e 100644 --- a/paddle/fluid/operators/benchmark/op_tester.cc +++ b/paddle/fluid/operators/benchmark/op_tester.cc @@ -449,19 +449,20 @@ std::string OpTester::DebugString() { switch (attr_type) { case framework::proto::AttrType::BOOLEAN: { ss << GenSpaces(count) << "type: BOOLEAN\n"; - ss << GenSpaces(count) << "b: " << BOOST_GET_CONST(bool, attr) << "\n"; + ss << GenSpaces(count) << "b: " << PADDLE_GET_CONST(bool, attr) << "\n"; } break; case framework::proto::AttrType::INT: { ss << GenSpaces(count) << "type: INT\n"; - ss << GenSpaces(count) << "i: " << BOOST_GET_CONST(int, attr) << "\n"; + ss << GenSpaces(count) << "i: " << PADDLE_GET_CONST(int, attr) << "\n"; } break; case framework::proto::AttrType::FLOAT: { ss << GenSpaces(count) << "type: FLOAT\n"; - ss << GenSpaces(count) << "f: " << BOOST_GET_CONST(float, attr) << "\n"; + ss << GenSpaces(count) << "f: " << PADDLE_GET_CONST(float, attr) + << "\n"; } break; case framework::proto::AttrType::STRING: { ss << GenSpaces(count) << "type: STRING\n"; - ss << GenSpaces(count) << "s: \"" << BOOST_GET_CONST(std::string, attr) + ss << GenSpaces(count) << "s: \"" << PADDLE_GET_CONST(std::string, attr) << "\"\n"; } break; case framework::proto::AttrType::BOOLEANS: { @@ -486,7 +487,7 @@ std::string OpTester::DebugString() { } break; case framework::proto::AttrType::LONG: { ss << GenSpaces(count) << "type: LONG\n"; - ss << GenSpaces(count) << "l: " << BOOST_GET_CONST(int64_t, attr) + ss << GenSpaces(count) << "l: " << PADDLE_GET_CONST(int64_t, attr) << "\n"; } break; case framework::proto::AttrType::LONGS: { diff --git a/paddle/fluid/operators/collective/c_allreduce_sum_op.cc b/paddle/fluid/operators/collective/c_allreduce_sum_op.cc index ba539a4f695435..5aac406bd6e26c 100644 --- a/paddle/fluid/operators/collective/c_allreduce_sum_op.cc +++ b/paddle/fluid/operators/collective/c_allreduce_sum_op.cc @@ -33,7 +33,7 @@ class CAllReduceSumOpGradMaker : public framework::SingleGradOpMaker { protected: void Apply(GradOpPtr retv) const override { - bool use_mp = BOOST_GET_CONST(bool, this->GetAttr("use_model_parallel")); + bool use_mp = PADDLE_GET_CONST(bool, this->GetAttr("use_model_parallel")); if (use_mp) { retv->SetType("c_identity"); } else { diff --git a/paddle/fluid/operators/controlflow/fetch_op.cc b/paddle/fluid/operators/controlflow/fetch_op.cc index 64e3d61237b32d..c1ed46867f1aca 100644 --- a/paddle/fluid/operators/controlflow/fetch_op.cc +++ b/paddle/fluid/operators/controlflow/fetch_op.cc @@ -117,18 +117,18 @@ class FetchOp : public framework::OperatorBase { if (fetch_var->IsType()) { auto &src_item = fetch_var->Get(); - auto *dst_item = &(BOOST_GET(framework::LoDTensor, fetch_list->at(col))); + auto *dst_item = &(PADDLE_GET(framework::LoDTensor, fetch_list->at(col))); DataCopy(src_item, fetch_var_name, dst_item); } else if (fetch_var->IsType()) { auto &src_item = fetch_var->Get(); - auto *dst_item = &(BOOST_GET(framework::Vocab, fetch_list->at(col))); + auto *dst_item = &(PADDLE_GET(framework::Vocab, fetch_list->at(col))); *dst_item = src_item; } else { auto &src_item = fetch_var->Get(); framework::LoDTensorArray tmp(src_item.size()); fetch_list->at(col) = tmp; auto &dst_item = - BOOST_GET(framework::LoDTensorArray, fetch_list->at(col)); + PADDLE_GET(framework::LoDTensorArray, fetch_list->at(col)); for (size_t i = 0; i < src_item.size(); ++i) { DataCopy(src_item[i], fetch_var_name, &dst_item[i]); } diff --git a/paddle/fluid/operators/controlflow/fetch_v2_op.cc b/paddle/fluid/operators/controlflow/fetch_v2_op.cc index c23b694f83a200..64489c294d1233 100644 --- a/paddle/fluid/operators/controlflow/fetch_v2_op.cc +++ b/paddle/fluid/operators/controlflow/fetch_v2_op.cc @@ -149,7 +149,7 @@ class FetchV2Kernel { if (!src_item.IsInitialized()) { return; } - auto *dst_item = &(BOOST_GET(framework::LoDTensor, fetch_list->at(col))); + auto *dst_item = &(PADDLE_GET(framework::LoDTensor, fetch_list->at(col))); bool check_place = platform::is_cpu_place(src_item.place()) || platform::is_cuda_pinned_place(src_item.place()); PADDLE_ENFORCE_EQ( @@ -168,7 +168,7 @@ class FetchV2Kernel { framework::LoDTensorArray tmp(src_item.size()); fetch_list->at(col) = tmp; auto &dst_item = - BOOST_GET(framework::LoDTensorArray, fetch_list->at(col)); + PADDLE_GET(framework::LoDTensorArray, fetch_list->at(col)); for (size_t i = 0; i < src_item.size(); ++i) { PADDLE_ENFORCE_EQ(platform::is_cpu_place(src_item[i].place()), true, diff --git a/paddle/fluid/operators/controlflow/op_variant.h b/paddle/fluid/operators/controlflow/op_variant.h index 04afe548e92e3e..738e7a4acc7eb0 100644 --- a/paddle/fluid/operators/controlflow/op_variant.h +++ b/paddle/fluid/operators/controlflow/op_variant.h @@ -53,7 +53,7 @@ class OpVariant { it, attrs.end(), platform::errors::NotFound("Cannot find attribute %s.", name)); - return BOOST_GET_CONST(AttrType, it->second); + return PADDLE_GET_CONST(AttrType, it->second); } bool operator==(const OpVariant &other) const { diff --git a/paddle/fluid/operators/controlflow/recurrent_op_helper.cc b/paddle/fluid/operators/controlflow/recurrent_op_helper.cc index 7b4af7eb9dfff6..56ac412b896c52 100644 --- a/paddle/fluid/operators/controlflow/recurrent_op_helper.cc +++ b/paddle/fluid/operators/controlflow/recurrent_op_helper.cc @@ -44,7 +44,7 @@ static bool IsSkippableVar(const std::string &name, static void ClearSkipVars(const OpVariant &op) { auto &attrs = const_cast(op.Attrs()); - std::vector &attr_skip_vars = BOOST_GET( + std::vector &attr_skip_vars = PADDLE_GET( std::vector, attrs[RecurrentBase::kSkipEagerDeletionVars]); attr_skip_vars.clear(); } @@ -55,7 +55,7 @@ static void AddSkipVars(const OpVariant &op, const Container &skip_vars) { auto &attrs = const_cast(op.Attrs()); VLOG(2) << "Prepare to add " << skip_vars.size() << " skip var(s): " << paddle::string::join_strings(skip_vars, ' '); - std::vector &attr_skip_vars = BOOST_GET( + std::vector &attr_skip_vars = PADDLE_GET( std::vector, attrs[RecurrentBase::kSkipEagerDeletionVars]); attr_skip_vars.insert( attr_skip_vars.end(), skip_vars.cbegin(), skip_vars.cend()); diff --git a/paddle/fluid/operators/controlflow/while_op.cc b/paddle/fluid/operators/controlflow/while_op.cc index fcd60e301024d7..6ccacddf070ed0 100644 --- a/paddle/fluid/operators/controlflow/while_op.cc +++ b/paddle/fluid/operators/controlflow/while_op.cc @@ -560,8 +560,8 @@ class WhileGradOpShapeInference : public framework::InferShapeBase { continue; } framework::VarDesc *in_var = - BOOST_GET(framework::VarDesc *, in_var_ptrs[i]); - BOOST_GET(framework::VarDesc *, out_var_ptrs[i]) + PADDLE_GET(framework::VarDesc *, in_var_ptrs[i]); + PADDLE_GET(framework::VarDesc *, out_var_ptrs[i]) ->SetShape(in_var->GetShape()); } } diff --git a/paddle/fluid/operators/cum_op.cc b/paddle/fluid/operators/cum_op.cc index b42f26342ab979..54e7a374338c25 100644 --- a/paddle/fluid/operators/cum_op.cc +++ b/paddle/fluid/operators/cum_op.cc @@ -64,13 +64,13 @@ class CumsumGradMaker : public framework::SingleGradOpMaker { grad_op->SetType("cumsum"); grad_op->SetInput("X", this->OutputGrad("Out")); grad_op->SetOutput("Out", this->InputGrad("X")); - grad_op->SetAttr("axis", BOOST_GET_CONST(int, this->GetAttr("axis"))); + grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis"))); grad_op->SetAttr("flatten", - BOOST_GET_CONST(bool, this->GetAttr("flatten"))); + PADDLE_GET_CONST(bool, this->GetAttr("flatten"))); grad_op->SetAttr("reverse", - !BOOST_GET_CONST(bool, this->GetAttr("reverse"))); + !PADDLE_GET_CONST(bool, this->GetAttr("reverse"))); grad_op->SetAttr("exclusive", - BOOST_GET_CONST(bool, this->GetAttr("exclusive"))); + PADDLE_GET_CONST(bool, this->GetAttr("exclusive"))); } }; @@ -131,13 +131,13 @@ class LogcumsumexpGradMaker : public framework::SingleGradOpMaker { grad_op->SetInput("Out", this->Output("Out")); grad_op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out")); grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); - grad_op->SetAttr("axis", BOOST_GET_CONST(int, this->GetAttr("axis"))); + grad_op->SetAttr("axis", PADDLE_GET_CONST(int, this->GetAttr("axis"))); grad_op->SetAttr("flatten", - BOOST_GET_CONST(bool, this->GetAttr("flatten"))); + PADDLE_GET_CONST(bool, this->GetAttr("flatten"))); grad_op->SetAttr("exclusive", - BOOST_GET_CONST(bool, this->GetAttr("exclusive"))); + PADDLE_GET_CONST(bool, this->GetAttr("exclusive"))); grad_op->SetAttr("reverse", - BOOST_GET_CONST(bool, this->GetAttr("reverse"))); + PADDLE_GET_CONST(bool, this->GetAttr("reverse"))); } }; diff --git a/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc b/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc index 2bae91638c5f38..ddb8685ee3ab88 100644 --- a/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc +++ b/paddle/fluid/operators/detection/collect_fpn_proposals_op.cc @@ -73,9 +73,9 @@ class CollectFpnProposalsOp : public framework::OperatorWithKernel { auto score_inputs = context->GetInputVarPtrs("MultiLevelScores"); for (size_t i = 0; i < roi_inputs.size(); ++i) { framework::Variable *roi_var = - BOOST_GET(framework::Variable *, roi_inputs[i]); + PADDLE_GET(framework::Variable *, roi_inputs[i]); framework::Variable *score_var = - BOOST_GET(framework::Variable *, score_inputs[i]); + PADDLE_GET(framework::Variable *, score_inputs[i]); auto &roi_lod = roi_var->Get().lod(); auto &score_lod = score_var->Get().lod(); PADDLE_ENFORCE_EQ( diff --git a/paddle/fluid/operators/empty_op.cc b/paddle/fluid/operators/empty_op.cc index e89241cb29178b..aed1ca284a1af3 100644 --- a/paddle/fluid/operators/empty_op.cc +++ b/paddle/fluid/operators/empty_op.cc @@ -77,7 +77,7 @@ class EmptyOpVarTypeInference : public framework::VarTypeInference { public: void operator()(framework::InferVarTypeContext* context) const override { auto data_type = static_cast( - BOOST_GET_CONST(int, context->GetAttr("dtype"))); + PADDLE_GET_CONST(int, context->GetAttr("dtype"))); context->SetOutputDataType("Out", data_type); } }; diff --git a/paddle/fluid/operators/eye_op.cc b/paddle/fluid/operators/eye_op.cc index 7aef74aca30c5f..5ff3641e757ce5 100644 --- a/paddle/fluid/operators/eye_op.cc +++ b/paddle/fluid/operators/eye_op.cc @@ -37,7 +37,7 @@ class EyeOpVarTypeInference : public framework::VarTypeInference { public: void operator()(framework::InferVarTypeContext* ctx) const override { auto data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("dtype"))); ctx->SetOutputDataType("Out", data_type); } }; diff --git a/paddle/fluid/operators/fill_any_like_op.cc b/paddle/fluid/operators/fill_any_like_op.cc index 03f973e8136884..528ea076a322be 100644 --- a/paddle/fluid/operators/fill_any_like_op.cc +++ b/paddle/fluid/operators/fill_any_like_op.cc @@ -75,7 +75,7 @@ class FillAnyLikeVarTypeInference : public framework::VarTypeInference { public: void operator()(framework::InferVarTypeContext *ctx) const override { auto var_data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("dtype"))); if (var_data_type < 0) { ctx->SetOutputDataType("Out", ctx->GetInputDataType("X")); } else { diff --git a/paddle/fluid/operators/fill_constant_op.cc b/paddle/fluid/operators/fill_constant_op.cc index dce044441a8a78..68ea043d680cb6 100644 --- a/paddle/fluid/operators/fill_constant_op.cc +++ b/paddle/fluid/operators/fill_constant_op.cc @@ -123,7 +123,7 @@ class FillConstantOpVarTypeInference : public framework::VarTypeInference { public: void operator()(framework::InferVarTypeContext *ctx) const override { auto data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("dtype"))); ctx->SetOutputDataType("Out", data_type); } }; diff --git a/paddle/fluid/operators/fill_op.cc b/paddle/fluid/operators/fill_op.cc index 5d7e6f7a7e3772..8fe7b417e662e6 100644 --- a/paddle/fluid/operators/fill_op.cc +++ b/paddle/fluid/operators/fill_op.cc @@ -63,7 +63,7 @@ class FillOpVarTypeInference : public framework::VarTypeInference { public: void operator()(framework::InferVarTypeContext* ctx) const override { auto data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("dtype"))); ctx->SetOutputDataType("Out", data_type); } }; diff --git a/paddle/fluid/operators/fused/fused_attention_op.cc b/paddle/fluid/operators/fused/fused_attention_op.cc index f093722c226585..30badd3125588d 100644 --- a/paddle/fluid/operators/fused/fused_attention_op.cc +++ b/paddle/fluid/operators/fused/fused_attention_op.cc @@ -612,7 +612,7 @@ class FusedAttentionGradOpMaker : public framework::SingleGradOpMaker { op->SetAttrMap(this->Attrs()); bool is_pre_layer_norm = - BOOST_GET_CONST(bool, op->GetAttr("pre_layer_norm")); + PADDLE_GET_CONST(bool, op->GetAttr("pre_layer_norm")); if (is_pre_layer_norm) { if (this->HasInput("LnScale")) { op->SetInput("LnScale", this->Input("LnScale")); diff --git a/paddle/fluid/operators/fused/fused_elemwise_activation_op.cc b/paddle/fluid/operators/fused/fused_elemwise_activation_op.cc index 31bb78922a5a56..85609076804809 100644 --- a/paddle/fluid/operators/fused/fused_elemwise_activation_op.cc +++ b/paddle/fluid/operators/fused/fused_elemwise_activation_op.cc @@ -283,14 +283,14 @@ class FusedElemwiseActivationGradMaker grad_op->SetAttrMap(this->Attrs()); - std::vector functor_names = BOOST_GET_CONST( + std::vector functor_names = PADDLE_GET_CONST( std::vector, grad_op->GetAttr("functor_list")); functor_names[0] += "_grad"; functor_names[1] += "_grad"; grad_op->SetAttr("functor_list", functor_names); - if (BOOST_GET_CONST(bool, grad_op->GetAttr("save_intermediate_out"))) { + if (PADDLE_GET_CONST(bool, grad_op->GetAttr("save_intermediate_out"))) { // PADDLE_ENFORCE_NE(Output("IntermediateOut").size(), 0); grad_op->SetInput("IntermediateOut", this->Output("IntermediateOut")); grad_op->SetOutput(framework::GradVarName("IntermediateOut"), diff --git a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc index 8439d5c7160bb1..d05fbeac00ad80 100644 --- a/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc +++ b/paddle/fluid/operators/fused/fused_embedding_seq_pool_op.cc @@ -58,7 +58,7 @@ class FusedEmbeddingSeqPoolOp : public framework::OperatorWithKernel { int64_t last_dim = FusedEmbeddingSeqPoolLastDim(table_dims, ids_dims); // in compile time, the lod level of ids must be 1 framework::VarDesc* ids_desc = - BOOST_GET(framework::VarDesc*, ctx->GetInputVarPtrs("Ids")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetInputVarPtrs("Ids")[0]); PADDLE_ENFORCE_EQ(ids_desc->GetLoDLevel(), 1, platform::errors::InvalidArgument( @@ -154,7 +154,7 @@ class FusedEmbeddingSeqPoolOpGradVarTypeInference void operator()(framework::InferVarTypeContext* ctx) const override { auto out_var_name = framework::GradVarName("W"); auto attr = ctx->GetAttr("is_sparse"); - bool is_sparse = BOOST_GET(bool, attr); + bool is_sparse = PADDLE_GET(bool, attr); if (is_sparse) { VLOG(3) << "fused_embedding_seq_pool_grad op " << framework::GradVarName("W") << " is set to SelectedRows"; diff --git a/paddle/fluid/operators/fused/fused_feedforward_op.cc b/paddle/fluid/operators/fused/fused_feedforward_op.cc index 1b61cd245c08ca..9b8b256a9ee54f 100644 --- a/paddle/fluid/operators/fused/fused_feedforward_op.cc +++ b/paddle/fluid/operators/fused/fused_feedforward_op.cc @@ -371,7 +371,7 @@ class FusedFeedForwardOpGradMaker : public framework::SingleGradOpMaker { op->SetInput("Dropout2Out", this->Output("Dropout2Out")); op->SetAttrMap(this->Attrs()); - bool pre_layer_norm = BOOST_GET_CONST(bool, op->GetAttr("pre_layer_norm")); + bool pre_layer_norm = PADDLE_GET_CONST(bool, op->GetAttr("pre_layer_norm")); op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); if (pre_layer_norm) { diff --git a/paddle/fluid/operators/fused/fused_gate_attention_op.cc b/paddle/fluid/operators/fused/fused_gate_attention_op.cc index 6b8cbbc9673856..2e6f991e41fa1f 100644 --- a/paddle/fluid/operators/fused/fused_gate_attention_op.cc +++ b/paddle/fluid/operators/fused/fused_gate_attention_op.cc @@ -276,7 +276,7 @@ class FusedGateAttentionGradOpMaker : public framework::SingleGradOpMaker { op->SetOutput(framework::GradVarName("Query"), this->InputGrad("Query")); op->SetAttrMap(this->Attrs()); - bool merge_qkv = BOOST_GET_CONST(bool, op->GetAttr("merge_qkv")); + bool merge_qkv = PADDLE_GET_CONST(bool, op->GetAttr("merge_qkv")); if (merge_qkv) { op->SetInput("QKVWeight", this->Input("QKVWeight")); op->SetOutput(framework::GradVarName("QKVWeight"), @@ -307,7 +307,7 @@ class FusedGateAttentionGradOpMaker : public framework::SingleGradOpMaker { op->SetInput("SoftmaxOut", this->Output("SoftmaxOut")); - bool has_gating = BOOST_GET_CONST(bool, op->GetAttr("has_gating")); + bool has_gating = PADDLE_GET_CONST(bool, op->GetAttr("has_gating")); if (has_gating) { op->SetInput("GateWeight", this->Input("GateWeight")); op->SetOutput(framework::GradVarName("GateWeight"), diff --git a/paddle/fluid/operators/graph_send_recv_op.cc b/paddle/fluid/operators/graph_send_recv_op.cc index 8a704078e9bb58..d9c0ec51714644 100644 --- a/paddle/fluid/operators/graph_send_recv_op.cc +++ b/paddle/fluid/operators/graph_send_recv_op.cc @@ -101,12 +101,12 @@ class GraphSendRecvGradOpMaker : public framework::SingleGradOpMaker { op->SetInput("Dst_index", this->Input("Dst_index")); op->SetInput("X", this->Input("X")); - if (BOOST_GET_CONST(std::string, this->GetAttr("pool_type")) == "MEAN") { + if (PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MEAN") { op->SetInput("Dst_count", this->Output("Dst_count")); } - if (BOOST_GET_CONST(std::string, this->GetAttr("pool_type")) == "MIN" || - BOOST_GET_CONST(std::string, this->GetAttr("pool_type")) == "MAX") { + if (PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MIN" || + PADDLE_GET_CONST(std::string, this->GetAttr("pool_type")) == "MAX") { op->SetInput("Out", this->Output("Out")); } diff --git a/paddle/fluid/operators/hierarchical_sigmoid_op.cc b/paddle/fluid/operators/hierarchical_sigmoid_op.cc index 9b44c9affe01ad..338b8af5036731 100644 --- a/paddle/fluid/operators/hierarchical_sigmoid_op.cc +++ b/paddle/fluid/operators/hierarchical_sigmoid_op.cc @@ -233,7 +233,7 @@ class HierarchicalSigmoidGradOpGradVarTypeInference } auto attr = ctx->GetAttr("is_sparse"); - bool is_sparse = BOOST_GET(bool, attr); + bool is_sparse = PADDLE_GET(bool, attr); if (is_sparse) { VLOG(3) << "hierarchical_sigmoid_grad op " << framework::GradVarName("W") << " is set to SelectedRows"; diff --git a/paddle/fluid/operators/im2sequence_op.cc b/paddle/fluid/operators/im2sequence_op.cc index dce0ca7a646fd5..503b64c343118d 100644 --- a/paddle/fluid/operators/im2sequence_op.cc +++ b/paddle/fluid/operators/im2sequence_op.cc @@ -53,7 +53,7 @@ class Im2SequenceOp : public framework::OperatorWithKernel { if (!ctx->IsRuntime()) { // set lod level for compile-time framework::VarDesc* out_desc = - BOOST_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); out_desc->SetLoDLevel(1); } diff --git a/paddle/fluid/operators/increment_op.cc b/paddle/fluid/operators/increment_op.cc index 3ab6b9f9405ed6..d044ae056ee127 100644 --- a/paddle/fluid/operators/increment_op.cc +++ b/paddle/fluid/operators/increment_op.cc @@ -77,7 +77,7 @@ class IncrementGradOpMaker : public framework::SingleGradOpMaker { grad_op->SetType("increment"); grad_op->SetInput("X", this->Output("Out")); grad_op->SetOutput("Out", this->Input("X")); - grad_op->SetAttr("step", -BOOST_GET_CONST(float, this->GetAttr("step"))); + grad_op->SetAttr("step", -PADDLE_GET_CONST(float, this->GetAttr("step"))); } }; diff --git a/paddle/fluid/operators/inplace_abn_op.cc b/paddle/fluid/operators/inplace_abn_op.cc index c1880d2a1a1941..791fef1f7c59d2 100644 --- a/paddle/fluid/operators/inplace_abn_op.cc +++ b/paddle/fluid/operators/inplace_abn_op.cc @@ -204,7 +204,7 @@ class InplaceABNOpGradMaker : public framework::SingleGradOpMaker { } // used when setting use_global_stats True during training - if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats"))) { + if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats"))) { op->SetInput("Mean", this->Output("MeanOut")); op->SetInput("Variance", this->Output("VarianceOut")); } diff --git a/paddle/fluid/operators/lod_reset_op.cc b/paddle/fluid/operators/lod_reset_op.cc index f3507142126468..b0a6b073b4a027 100644 --- a/paddle/fluid/operators/lod_reset_op.cc +++ b/paddle/fluid/operators/lod_reset_op.cc @@ -85,7 +85,7 @@ class LoDResetOpVarTypeInference void operator()(framework::InferVarTypeContext *ctx) const override { auto x_var_name = Input(ctx, "X").front(); auto out_var_name = Output(ctx, "Out").front(); - bool append = BOOST_GET_CONST(bool, ctx->GetAttr("append")); + bool append = PADDLE_GET_CONST(bool, ctx->GetAttr("append")); if (ctx->HasInput("Y")) { auto y_var_name = Input(ctx, "Y").front(); auto y_lod_level = std::max(GetLoDLevel(ctx, y_var_name), 1); diff --git a/paddle/fluid/operators/lookup_table_op.cc b/paddle/fluid/operators/lookup_table_op.cc index 6282fdd543bbae..74b406aef648b6 100644 --- a/paddle/fluid/operators/lookup_table_op.cc +++ b/paddle/fluid/operators/lookup_table_op.cc @@ -203,7 +203,7 @@ class LookupTableOpGradVarTypeInference : public framework::VarTypeInference { void operator()(framework::InferVarTypeContext* ctx) const override { auto out_var_name = framework::GradVarName("W"); auto attr = ctx->GetAttr("is_sparse"); - bool is_sparse = BOOST_GET(bool, attr); + bool is_sparse = PADDLE_GET(bool, attr); if (is_sparse) { VLOG(3) << "lookup_table_grad op " << framework::GradVarName("W") << " is set to SelectedRows"; diff --git a/paddle/fluid/operators/lookup_table_v2_op.cc b/paddle/fluid/operators/lookup_table_v2_op.cc index 46054daf48ccc5..37da33b0a3d7c9 100644 --- a/paddle/fluid/operators/lookup_table_v2_op.cc +++ b/paddle/fluid/operators/lookup_table_v2_op.cc @@ -181,7 +181,7 @@ class LookupTableV2OpGradVarTypeInference : public framework::VarTypeInference { void operator()(framework::InferVarTypeContext* ctx) const override { auto out_var_name = framework::GradVarName("W"); auto attr = ctx->GetAttr("is_sparse"); - bool is_sparse = BOOST_GET(bool, attr); + bool is_sparse = PADDLE_GET(bool, attr); if (is_sparse) { VLOG(3) << "lookup_table_v2_grad op " << framework::GradVarName("W") << " is set to SelectedRows"; diff --git a/paddle/fluid/operators/match_matrix_tensor_op.cc b/paddle/fluid/operators/match_matrix_tensor_op.cc index 992d9e9f276c44..80313f156f1f2c 100644 --- a/paddle/fluid/operators/match_matrix_tensor_op.cc +++ b/paddle/fluid/operators/match_matrix_tensor_op.cc @@ -91,7 +91,7 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { int64_t tmp_dim_0 = -1; if (ctx->IsRuntime()) { framework::Variable* x_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); const auto& x_lod = x_var->Get().lod(); PADDLE_ENFORCE_EQ(x_lod.empty(), false, @@ -116,7 +116,7 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { x_dims[0])); framework::Variable* y_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("Y")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("Y")[0]); const auto& y_lod = y_var->Get().lod(); PADDLE_ENFORCE_EQ(y_lod.empty(), false, @@ -162,7 +162,7 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { } else { // compile time framework::VarDesc* x_desc = - BOOST_GET(framework::VarDesc*, ctx->GetInputVarPtrs("X")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetInputVarPtrs("X")[0]); PADDLE_ENFORCE_GE( x_desc->GetLoDLevel(), 1, @@ -170,7 +170,7 @@ void MatchMatrixTensorOP::InferShape(framework::InferShapeContext* ctx) const { "greater than 1, but reviced %d.", x_desc->GetLoDLevel())); framework::VarDesc* y_desc = - BOOST_GET(framework::VarDesc*, ctx->GetInputVarPtrs("Y")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetInputVarPtrs("Y")[0]); PADDLE_ENFORCE_GE( y_desc->GetLoDLevel(), 1, diff --git a/paddle/fluid/operators/nce_op.cc b/paddle/fluid/operators/nce_op.cc index bcc36e71cf4224..c9c4d1a4c74f33 100644 --- a/paddle/fluid/operators/nce_op.cc +++ b/paddle/fluid/operators/nce_op.cc @@ -323,7 +323,7 @@ class NCEOpGradVarTypeInference : public framework::VarTypeInference { auto weight_grad = framework::GradVarName("Weight"); auto attr = ctx->GetAttr("is_sparse"); - bool is_sparse = BOOST_GET(bool, attr); + bool is_sparse = PADDLE_GET(bool, attr); if (is_sparse) { VLOG(3) << "nce_op_grad op " << weight_grad << " and " << " is set to SelectedRows"; diff --git a/paddle/fluid/operators/prim_ops/add_p_op.cc b/paddle/fluid/operators/prim_ops/add_p_op.cc index 779f867ba6b717..ae95b16adb4040 100644 --- a/paddle/fluid/operators/prim_ops/add_p_op.cc +++ b/paddle/fluid/operators/prim_ops/add_p_op.cc @@ -57,8 +57,8 @@ class AddPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); - framework::VarDesc *y_var = BOOST_GET(framework::VarDesc *, y_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); auto x_shape = x_var->GetShape(); auto y_shape = y_var->GetShape(); size_t x_rank = x_shape.size(); @@ -82,7 +82,7 @@ class AddPrimOpShapeInference : public framework::InferShapeBase { y_shape[i])); } - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/broadcast_p_op.cc b/paddle/fluid/operators/prim_ops/broadcast_p_op.cc index d88eec565eebfb..6d4853cc5a4781 100644 --- a/paddle/fluid/operators/prim_ops/broadcast_p_op.cc +++ b/paddle/fluid/operators/prim_ops/broadcast_p_op.cc @@ -86,11 +86,11 @@ class BroadcastPrimOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); auto x_shape = x_var->GetShape(); auto target_shape = ctx->Attrs().Get>("shape"); CheckShapeValid(x_shape, target_shape); - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(target_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(target_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/concat_p_op.cc b/paddle/fluid/operators/prim_ops/concat_p_op.cc index 71b463bd4427b3..9047679e8a62b2 100644 --- a/paddle/fluid/operators/prim_ops/concat_p_op.cc +++ b/paddle/fluid/operators/prim_ops/concat_p_op.cc @@ -59,13 +59,13 @@ class ConcatPrimOpShapeInference : public framework::InferShapeBase { auto axis = ctx->Attrs().Get("axis"); int64_t cnt_along_axis = 0; framework::VarDesc *first_x_var = - BOOST_GET(framework::VarDesc *, x_var_ptrs[0]); + PADDLE_GET(framework::VarDesc *, x_var_ptrs[0]); auto first_x_shape = first_x_var->GetShape(); cnt_along_axis += first_x_shape[axis]; size_t first_x_rank = first_x_shape.size(); for (size_t i = 1; i < x_var_ptrs.size(); ++i) { framework::VarDesc *x_var = - BOOST_GET(framework::VarDesc *, x_var_ptrs[i]); + PADDLE_GET(framework::VarDesc *, x_var_ptrs[i]); auto x_shape = x_var->GetShape(); cnt_along_axis += x_shape[axis]; size_t x_rank = x_shape.size(); @@ -97,7 +97,7 @@ class ConcatPrimOpShapeInference : public framework::InferShapeBase { std::vector y_shape(first_x_shape); y_shape[axis] = cnt_along_axis; - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(y_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(y_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/div_p_op.cc b/paddle/fluid/operators/prim_ops/div_p_op.cc index 5be3e47cc54c78..a20e46151e68ec 100644 --- a/paddle/fluid/operators/prim_ops/div_p_op.cc +++ b/paddle/fluid/operators/prim_ops/div_p_op.cc @@ -57,8 +57,8 @@ class DivPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); - framework::VarDesc *y_var = BOOST_GET(framework::VarDesc *, y_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); auto x_shape = x_var->GetShape(); auto y_shape = y_var->GetShape(); size_t x_rank = x_shape.size(); @@ -82,7 +82,7 @@ class DivPrimOpShapeInference : public framework::InferShapeBase { y_shape[i])); } - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/fill_constant_p_op.cc b/paddle/fluid/operators/prim_ops/fill_constant_p_op.cc index 20069d32db25fe..043d9ee26eb3d2 100644 --- a/paddle/fluid/operators/prim_ops/fill_constant_p_op.cc +++ b/paddle/fluid/operators/prim_ops/fill_constant_p_op.cc @@ -57,7 +57,7 @@ class FillConstantPrimOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; auto shape = ctx->Attrs().Get>("shape"); - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(shape); } }; @@ -67,7 +67,7 @@ class FillConstantPrimOpVarTypeInference void operator()(framework::InferVarTypeContext *ctx) const override { auto y_name = Output(ctx, "Y")[0]; auto data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("dtype"))); SetDataType(ctx, y_name, data_type); } }; diff --git a/paddle/fluid/operators/prim_ops/gather_p_op.cc b/paddle/fluid/operators/prim_ops/gather_p_op.cc index 405ef66b1147cd..cb8b7eee2ebc40 100644 --- a/paddle/fluid/operators/prim_ops/gather_p_op.cc +++ b/paddle/fluid/operators/prim_ops/gather_p_op.cc @@ -66,7 +66,7 @@ class GatherPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr index_var_ptr = ctx->GetInputVarPtrs("IndexTensor")[0]; framework::VarDesc *index_var = - BOOST_GET(framework::VarDesc *, index_var_ptr); + PADDLE_GET(framework::VarDesc *, index_var_ptr); auto index_shape = index_var->GetShape(); PADDLE_ENFORCE_EQ(index_shape.size(), 1, @@ -80,11 +80,11 @@ class GatherPrimOpShapeInference : public framework::InferShapeBase { } auto axis = ctx->Attrs().Get("axis"); - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); auto x_shape = x_var->GetShape(); x_shape[axis] = num_index; - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/matmul_p_op.cc b/paddle/fluid/operators/prim_ops/matmul_p_op.cc index 2986a0dd40caa3..c1701c33a46d30 100644 --- a/paddle/fluid/operators/prim_ops/matmul_p_op.cc +++ b/paddle/fluid/operators/prim_ops/matmul_p_op.cc @@ -57,8 +57,8 @@ class MatmulPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); - framework::VarDesc *y_var = BOOST_GET(framework::VarDesc *, y_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); auto x_shape = x_var->GetShape(); auto y_shape = y_var->GetShape(); size_t x_rank = x_shape.size(); @@ -90,7 +90,7 @@ class MatmulPrimOpShapeInference : public framework::InferShapeBase { y_shape[y_rank - 2])); if (x_rank == 2) { std::vector z_shape{x_shape[x_rank - 2], y_shape[y_rank - 1]}; - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(z_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(z_shape); } else { PADDLE_ENFORCE_EQ(x_shape[0], y_shape[0], @@ -104,7 +104,7 @@ class MatmulPrimOpShapeInference : public framework::InferShapeBase { std::vector z_shape{ x_shape[0], x_shape[x_rank - 2], y_shape[y_rank - 1]}; - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(z_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(z_shape); } } }; diff --git a/paddle/fluid/operators/prim_ops/mul_p_op.cc b/paddle/fluid/operators/prim_ops/mul_p_op.cc index 34d996b6ceb332..bf59a0de875b6a 100644 --- a/paddle/fluid/operators/prim_ops/mul_p_op.cc +++ b/paddle/fluid/operators/prim_ops/mul_p_op.cc @@ -57,8 +57,8 @@ class MulPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); - framework::VarDesc *y_var = BOOST_GET(framework::VarDesc *, y_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); auto x_shape = x_var->GetShape(); auto y_shape = y_var->GetShape(); size_t x_rank = x_shape.size(); @@ -82,7 +82,7 @@ class MulPrimOpShapeInference : public framework::InferShapeBase { y_shape[i])); } - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/reduce_p_op.cc b/paddle/fluid/operators/prim_ops/reduce_p_op.cc index 4e4b1d391c3c6f..3c18ce46f9d937 100644 --- a/paddle/fluid/operators/prim_ops/reduce_p_op.cc +++ b/paddle/fluid/operators/prim_ops/reduce_p_op.cc @@ -63,7 +63,7 @@ class ReducePrimOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); auto x_shape = x_var->GetShape(); auto axis = ctx->Attrs().Get>("axis"); auto keepdim = ctx->Attrs().Get("keepdim"); @@ -83,7 +83,7 @@ class ReducePrimOpShapeInference : public framework::InferShapeBase { x_shape.push_back(1); } - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/reshape_p_op.cc b/paddle/fluid/operators/prim_ops/reshape_p_op.cc index 7854be6d60f854..7413b9ece2c0b6 100644 --- a/paddle/fluid/operators/prim_ops/reshape_p_op.cc +++ b/paddle/fluid/operators/prim_ops/reshape_p_op.cc @@ -64,7 +64,7 @@ class ReshapePrimOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); auto x_shape = x_var->GetShape(); auto shape = ctx->Attrs().Get>("shape"); PADDLE_ENFORCE_EQ(product(x_shape), @@ -75,7 +75,7 @@ class ReshapePrimOpShapeInference : public framework::InferShapeBase { "contains %d elements", product(x_shape), product(shape))); - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(shape); } }; diff --git a/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc b/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc index 54d9d61318ae58..4120de1adc7c64 100644 --- a/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc +++ b/paddle/fluid/operators/prim_ops/scatter_add_p_op.cc @@ -70,7 +70,7 @@ class ScatterAddPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr index_var_ptr = ctx->GetInputVarPtrs("IndexTensor")[0]; framework::VarDesc *index_var = - BOOST_GET(framework::VarDesc *, index_var_ptr); + PADDLE_GET(framework::VarDesc *, index_var_ptr); auto index_shape = index_var->GetShape(); PADDLE_ENFORCE_EQ(index_shape.size(), 1, @@ -83,8 +83,8 @@ class ScatterAddPrimOpShapeInference : public framework::InferShapeBase { num_index = ctx->Attrs().Get>("index").size(); } auto axis = ctx->Attrs().Get("axis"); - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); - framework::VarDesc *y_var = BOOST_GET(framework::VarDesc *, y_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); auto x_shape = x_var->GetShape(); auto y_shape = y_var->GetShape(); size_t x_rank = x_shape.size(); @@ -118,7 +118,7 @@ class ScatterAddPrimOpShapeInference : public framework::InferShapeBase { } } - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/slice_assign_p_op.cc b/paddle/fluid/operators/prim_ops/slice_assign_p_op.cc index 3ff07e0ba02dca..c9ee3fb5e5d3e5 100644 --- a/paddle/fluid/operators/prim_ops/slice_assign_p_op.cc +++ b/paddle/fluid/operators/prim_ops/slice_assign_p_op.cc @@ -66,8 +66,8 @@ class SliceAssignPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); - framework::VarDesc *y_var = BOOST_GET(framework::VarDesc *, y_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); auto x_shape = x_var->GetShape(); auto y_shape = y_var->GetShape(); size_t x_rank = x_shape.size(); @@ -123,7 +123,7 @@ class SliceAssignPrimOpShapeInference : public framework::InferShapeBase { y_target_shape[i], y_shape[i])); } - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/slice_select_p_op.cc b/paddle/fluid/operators/prim_ops/slice_select_p_op.cc index b562c9549ecb9e..79b870184e07fc 100644 --- a/paddle/fluid/operators/prim_ops/slice_select_p_op.cc +++ b/paddle/fluid/operators/prim_ops/slice_select_p_op.cc @@ -64,7 +64,7 @@ class SliceSelectPrimOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); auto x_shape = x_var->GetShape(); auto axis = ctx->Attrs().Get>("axis"); auto starts = ctx->Attrs().Get>("starts"); @@ -97,7 +97,7 @@ class SliceSelectPrimOpShapeInference : public framework::InferShapeBase { for (size_t i = 0; i < axis.size(); ++i) { x_shape[axis[i]] = (ends[i] - starts[i] + strides[i] - 1) / strides[i]; } - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/split_p_op.cc b/paddle/fluid/operators/prim_ops/split_p_op.cc index 24805cc1529866..4ad9d82467d28e 100644 --- a/paddle/fluid/operators/prim_ops/split_p_op.cc +++ b/paddle/fluid/operators/prim_ops/split_p_op.cc @@ -65,7 +65,7 @@ class SplitPrimOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; auto y_var_ptrs = ctx->GetOutputVarPtrs("YS"); - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); auto x_shape = x_var->GetShape(); auto axis = ctx->Attrs().Get("axis"); auto num_or_sections = @@ -81,14 +81,14 @@ class SplitPrimOpShapeInference : public framework::InferShapeBase { axis)); y_shape[axis] = x_shape[axis] / num_or_sections[0]; for (size_t i = 0; i < size_t(num_or_sections[0]); ++i) { - BOOST_GET(framework::VarDesc *, y_var_ptrs[i])->SetShape(y_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptrs[i])->SetShape(y_shape); } } else { int64_t cnt_along_axis = 0; for (size_t i = 0; i < num_or_sections.size(); ++i) { y_shape[axis] = num_or_sections[i]; cnt_along_axis += num_or_sections[i]; - BOOST_GET(framework::VarDesc *, y_var_ptrs[i])->SetShape(y_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptrs[i])->SetShape(y_shape); } PADDLE_ENFORCE_EQ( x_shape[axis], diff --git a/paddle/fluid/operators/prim_ops/sqrt_p_op.cc b/paddle/fluid/operators/prim_ops/sqrt_p_op.cc index d0f5a3852db226..2540b39a25696e 100644 --- a/paddle/fluid/operators/prim_ops/sqrt_p_op.cc +++ b/paddle/fluid/operators/prim_ops/sqrt_p_op.cc @@ -55,9 +55,9 @@ class SqrtPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_var->GetShape()); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_var->GetShape()); } }; diff --git a/paddle/fluid/operators/prim_ops/sub_p_op.cc b/paddle/fluid/operators/prim_ops/sub_p_op.cc index 6d701b10e960cf..35369192737999 100644 --- a/paddle/fluid/operators/prim_ops/sub_p_op.cc +++ b/paddle/fluid/operators/prim_ops/sub_p_op.cc @@ -57,8 +57,8 @@ class SubPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr y_var_ptr = ctx->GetInputVarPtrs("Y")[0]; framework::InferShapeVarPtr z_var_ptr = ctx->GetOutputVarPtrs("Z")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); - framework::VarDesc *y_var = BOOST_GET(framework::VarDesc *, y_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *y_var = PADDLE_GET(framework::VarDesc *, y_var_ptr); auto x_shape = x_var->GetShape(); auto y_shape = y_var->GetShape(); size_t x_rank = x_shape.size(); @@ -82,7 +82,7 @@ class SubPrimOpShapeInference : public framework::InferShapeBase { y_shape[i])); } - BOOST_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); + PADDLE_GET(framework::VarDesc *, z_var_ptr)->SetShape(x_shape); } }; diff --git a/paddle/fluid/operators/prim_ops/tanh_p_op.cc b/paddle/fluid/operators/prim_ops/tanh_p_op.cc index e1015217cf600b..97695c9618f47e 100644 --- a/paddle/fluid/operators/prim_ops/tanh_p_op.cc +++ b/paddle/fluid/operators/prim_ops/tanh_p_op.cc @@ -55,9 +55,9 @@ class TanhPrimOpShapeInference : public framework::InferShapeBase { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_var->GetShape()); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(x_var->GetShape()); } }; diff --git a/paddle/fluid/operators/prim_ops/transpose_p_op.cc b/paddle/fluid/operators/prim_ops/transpose_p_op.cc index 9a5635617164fe..6c907be7799fbf 100644 --- a/paddle/fluid/operators/prim_ops/transpose_p_op.cc +++ b/paddle/fluid/operators/prim_ops/transpose_p_op.cc @@ -56,7 +56,7 @@ class TransposePrimOpShapeInference : public framework::InferShapeBase { void operator()(framework::InferShapeContext *ctx) const override { framework::InferShapeVarPtr x_var_ptr = ctx->GetInputVarPtrs("X")[0]; framework::InferShapeVarPtr y_var_ptr = ctx->GetOutputVarPtrs("Y")[0]; - framework::VarDesc *x_var = BOOST_GET(framework::VarDesc *, x_var_ptr); + framework::VarDesc *x_var = PADDLE_GET(framework::VarDesc *, x_var_ptr); auto x_shape = x_var->GetShape(); auto axis = ctx->Attrs().Get>("axis"); size_t x_rank = x_shape.size(); @@ -101,7 +101,7 @@ class TransposePrimOpShapeInference : public framework::InferShapeBase { for (size_t i = 0; i < axis_size; i++) { y_shape[i] = x_shape[axis[i]]; } - BOOST_GET(framework::VarDesc *, y_var_ptr)->SetShape(y_shape); + PADDLE_GET(framework::VarDesc *, y_var_ptr)->SetShape(y_shape); } }; diff --git a/paddle/fluid/operators/py_func_op.cc b/paddle/fluid/operators/py_func_op.cc index 28821b6a745868..df3e96e531c181 100644 --- a/paddle/fluid/operators/py_func_op.cc +++ b/paddle/fluid/operators/py_func_op.cc @@ -144,11 +144,11 @@ class PyFuncOpVarTypeInference : public framework::StaticGraphVarTypeInference { has_out)); PADDLE_ENFORCE_GE( - BOOST_GET_CONST(int, ctx->GetAttr(kForwardPythonCallableId)), + PADDLE_GET_CONST(int, ctx->GetAttr(kForwardPythonCallableId)), 0, platform::errors::InvalidArgument( "Function id cannot be less than 0, but received value is %d.", - BOOST_GET_CONST(int, ctx->GetAttr(kForwardPythonCallableId)))); + PADDLE_GET_CONST(int, ctx->GetAttr(kForwardPythonCallableId)))); if (!has_out) return; @@ -241,7 +241,7 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase { std::vector> operator()() const override { auto &fwd_attrs = Attrs(); // no backward op when backward_id is less than 0 - if (BOOST_GET_CONST(int, fwd_attrs.at(kBackwardPythonCallableId)) < 0) { + if (PADDLE_GET_CONST(int, fwd_attrs.at(kBackwardPythonCallableId)) < 0) { return {}; } @@ -261,7 +261,7 @@ class PyFuncOpGradDescMaker : public framework::GradOpDescMakerBase { // For memory reused, some inputs/output in forward part may be not needed // in backward part. Skipping these vars helps to save memory - auto &backward_skip_var_list = BOOST_GET_CONST( + auto &backward_skip_var_list = PADDLE_GET_CONST( std::vector, fwd_attrs.at(kPyFuncBackwardSkipVars)); std::unordered_set backward_skip_var_set( backward_skip_var_list.begin(), backward_skip_var_list.end()); diff --git a/paddle/fluid/operators/randperm_op.cc b/paddle/fluid/operators/randperm_op.cc index a8b9f900ce79a0..565707853e22c5 100644 --- a/paddle/fluid/operators/randperm_op.cc +++ b/paddle/fluid/operators/randperm_op.cc @@ -12,6 +12,8 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ +#include "paddle/fluid/operators/randperm_op.h" + #include #include "paddle/fluid/framework/op_registry.h" @@ -79,7 +81,7 @@ class RandpermOpVarTypeInference : public framework::VarTypeInference { public: void operator()(framework::InferVarTypeContext *ctx) const override { auto var_data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("dtype"))); ctx->SetOutputDataType("Out", var_data_type); } }; diff --git a/paddle/fluid/operators/reader/create_custom_reader_op.cc b/paddle/fluid/operators/reader/create_custom_reader_op.cc index b7057ea77e40fd..5285d14ec7d539 100644 --- a/paddle/fluid/operators/reader/create_custom_reader_op.cc +++ b/paddle/fluid/operators/reader/create_custom_reader_op.cc @@ -123,7 +123,7 @@ class CustomReaderInferShape : public framework::InferShapeBase { res_lod_levels.push_back(sink_var->GetLoDLevel()); } auto* out_reader = - BOOST_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); out_reader->SetShapes(res_dims); out_reader->SetLoDLevels(res_lod_levels); } @@ -139,10 +139,10 @@ class CustomReaderInferVarType : public framework::VarTypeInference { "The output reader variable should not be null.")); ctx->SetType(out_var_name, framework::proto::VarType::READER); - auto sink_var_names = BOOST_GET_CONST(std::vector, - ctx->GetAttr("sink_var_names")); + auto sink_var_names = PADDLE_GET_CONST(std::vector, + ctx->GetAttr("sink_var_names")); const auto* sub_block = - BOOST_GET_CONST(framework::BlockDesc*, ctx->GetAttr("sub_block")); + PADDLE_GET_CONST(framework::BlockDesc*, ctx->GetAttr("sub_block")); std::vector res_data_types; for (const std::string& var_name : sink_var_names) { framework::VarDesc* var = sub_block->FindVar(var_name); diff --git a/paddle/fluid/operators/reader/read_op.cc b/paddle/fluid/operators/reader/read_op.cc index 95f44579dcea56..deb0e4a49337f9 100644 --- a/paddle/fluid/operators/reader/read_op.cc +++ b/paddle/fluid/operators/reader/read_op.cc @@ -55,7 +55,7 @@ class ReadInferShape : public framework::InferShapeBase { "The reader's dim number doesn't match the output number.")); ctx->SetOutputsDim("Out", reader_dims); auto in_desc = - BOOST_GET(framework::VarDesc*, ctx->GetInputVarPtrs("Reader")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetInputVarPtrs("Reader")[0]); auto in_lod_levels = in_desc->GetLoDLevels(); auto out_var_ptrs = ctx->GetOutputVarPtrs("Out"); PADDLE_ENFORCE_EQ( @@ -65,7 +65,7 @@ class ReadInferShape : public framework::InferShapeBase { "LoDLevels of Input(Reader) must be the same as the " "number of Outputs(Out).")); for (size_t i = 0; i < out_var_ptrs.size(); ++i) { - auto* out_desc = BOOST_GET(framework::VarDesc*, out_var_ptrs[i]); + auto* out_desc = PADDLE_GET(framework::VarDesc*, out_var_ptrs[i]); out_desc->SetLoDLevel(in_lod_levels[i]); } } @@ -75,7 +75,7 @@ class ReadInferShape : public framework::InferShapeBase { class ReadInferVarType : public framework::StaticGraphVarTypeInference { public: void operator()(framework::InferVarTypeContext* ctx) const override { - bool infer_out = BOOST_GET_CONST(bool, ctx->GetAttr("infer_out")); + bool infer_out = PADDLE_GET_CONST(bool, ctx->GetAttr("infer_out")); if (infer_out) { std::string reader_name = Input(ctx, "Reader")[0]; auto& out_names = Output(ctx, "Out"); diff --git a/paddle/fluid/operators/reader/reader_op_registry.cc b/paddle/fluid/operators/reader/reader_op_registry.cc index 0bf35448ab8b38..ff9d4260230dd6 100644 --- a/paddle/fluid/operators/reader/reader_op_registry.cc +++ b/paddle/fluid/operators/reader/reader_op_registry.cc @@ -112,7 +112,7 @@ void FileReaderInferShape::operator()(framework::InferShapeContext* ctx) const { need_check_feed.size(), shapes.size())); framework::VarDesc* reader = - BOOST_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); reader->SetLoDLevels(lod_levels); } } @@ -141,10 +141,10 @@ void DecoratedReaderInferShape::operator()( "The output decorated reader should not be null.")); ctx->SetReaderDims("Out", ctx->GetReaderDims("UnderlyingReader")); - framework::VarDesc* in_reader = BOOST_GET( + framework::VarDesc* in_reader = PADDLE_GET( framework::VarDesc*, ctx->GetInputVarPtrs("UnderlyingReader")[0]); framework::VarDesc* out_reader = - BOOST_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); + PADDLE_GET(framework::VarDesc*, ctx->GetOutputVarPtrs("Out")[0]); out_reader->SetLoDLevels(in_reader->GetLoDLevels()); } diff --git a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc index d072dcfa5eb94d..7f5a174952e81b 100644 --- a/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc +++ b/paddle/fluid/operators/reduce_ops/reduce_sum_op.cc @@ -83,7 +83,7 @@ class ReduceSumVarTypeInference : public paddle::framework::VarTypeInference { public: void operator()(paddle::framework::InferVarTypeContext* ctx) const override { auto data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("out_dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("out_dtype"))); if (data_type >= 0) { ctx->SetOutputDataType("Out", data_type); } else { diff --git a/paddle/fluid/operators/run_program_op.cc b/paddle/fluid/operators/run_program_op.cc index fd400d29136701..94da2b2b35ba0d 100644 --- a/paddle/fluid/operators/run_program_op.cc +++ b/paddle/fluid/operators/run_program_op.cc @@ -212,7 +212,7 @@ class RunProgramGradOpMaker : public framework::SingleGradOpMaker { grad_op->SetOutput(framework::GradVarName("X"), this->InputGrad("X")); auto block_desc = - BOOST_GET_CONST(BlockDesc*, this->GetAttr("global_block")); + PADDLE_GET_CONST(BlockDesc*, this->GetAttr("global_block")); auto params_grad = this->InputGrad("Params"); FilterHelper::filter(block_desc, ¶ms_grad); // filter the vector. grad_op->SetOutput(framework::GradVarName("Params"), params_grad); diff --git a/paddle/fluid/operators/segment_pool_op.cc b/paddle/fluid/operators/segment_pool_op.cc index 751a3050f68c6b..2cdc5746614bbe 100644 --- a/paddle/fluid/operators/segment_pool_op.cc +++ b/paddle/fluid/operators/segment_pool_op.cc @@ -132,7 +132,7 @@ class SegmentPoolGradOpMaker : public framework::SingleGradOpMaker { op_desc_ptr->SetInput("X", this->Input("X")); op_desc_ptr->SetInput("SegmentIds", this->Input("SegmentIds")); op_desc_ptr->SetInput("Out", this->Output("Out")); - if (BOOST_GET_CONST(std::string, this->GetAttr("pooltype")) == "MEAN") { + if (PADDLE_GET_CONST(std::string, this->GetAttr("pooltype")) == "MEAN") { op_desc_ptr->SetInput("SummedIds", this->Output("SummedIds")); } op_desc_ptr->SetInput(framework::GradVarName("Out"), diff --git a/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc index f1350ce334b418..7056c52cd8ba8c 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_conv_op.cc @@ -222,7 +222,7 @@ class SequenceConvGradOpMaker : public framework::SingleGradOpMaker { op->SetAttrMap(this->Attrs()); if (op->HasAttr("paddingTrainable") && - BOOST_GET_CONST(bool, op->GetAttr("paddingTrainable")) && + PADDLE_GET_CONST(bool, op->GetAttr("paddingTrainable")) && this->HasInput("PaddingData")) { op->SetInput("PaddingData", this->Input("PaddingData")); op->SetOutput(framework::GradVarName("PaddingData"), @@ -246,7 +246,7 @@ class SequenceConvGradNoNeedBufferVarsInference const std::unordered_set &operator()( const framework::InferNoNeedBufferVarsContext &ctx) const final { static const std::unordered_set kPaddingData({"PaddingData"}); - if (!BOOST_GET_CONST(bool, ctx.GetAttr("paddingTrainable"))) { + if (!PADDLE_GET_CONST(bool, ctx.GetAttr("paddingTrainable"))) { return kPaddingData; } else { return Empty(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc index 5c3731fc90253f..80c1bac029738e 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_as_op.cc @@ -45,9 +45,9 @@ class SequenceExpandAsOp : public framework::OperatorWithKernel { if (ctx->IsRuntime()) { framework::Variable* x_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); framework::Variable* y_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("Y")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("Y")[0]); auto& x_dim = x_var->Get().dims(); auto& y_lod = y_var->Get().lod(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc index a2fb088975e39e..1484468c6979a4 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_expand_op.cc @@ -46,9 +46,9 @@ class SequenceExpandOp : public framework::OperatorWithKernel { if (ctx->IsRuntime()) { framework::Variable* x_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); framework::Variable* y_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("Y")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("Y")[0]); auto& x_lod = x_var->Get().lod(); auto& y_lod = y_var->Get().lod(); diff --git a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc index ad4876970c532a..e5c84d45d55e91 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pad_op.cc @@ -68,7 +68,7 @@ class SequencePadOp : public framework::OperatorWithKernel { if (ctx->IsRuntime()) { // run time framework::Variable* x_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); const auto& x_lod = x_var->Get().lod(); PADDLE_ENFORCE_EQ(x_lod.empty(), false, diff --git a/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc index 6c146a699af8b8..9b8697b976633a 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_pool_op.cc @@ -168,7 +168,7 @@ class SequencePoolGradOpMaker : public framework::SingleGradOpMaker { void Apply(GradOpPtr op_desc_ptr) const override { op_desc_ptr->SetType("sequence_pool_grad"); op_desc_ptr->SetInput("X", this->Input("X")); - if (BOOST_GET_CONST(std::string, this->GetAttr("pooltype")) == "MAX") { + if (PADDLE_GET_CONST(std::string, this->GetAttr("pooltype")) == "MAX") { op_desc_ptr->SetInput("MaxIndex", this->Output("MaxIndex")); } op_desc_ptr->SetInput(framework::GradVarName("Out"), diff --git a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc index 5861334dce0a97..1d53c39713acf4 100644 --- a/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc +++ b/paddle/fluid/operators/sequence_ops/sequence_scatter_op.cc @@ -99,9 +99,9 @@ class SequenceScatterOp : public framework::OperatorWithKernel { // Enforce LoD of ids and updates be the same if (ctx->IsRuntime()) { framework::Variable* ids_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("Ids")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("Ids")[0]); framework::Variable* updates_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("Updates")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("Updates")[0]); auto& ids_lod = ids_var->Get().lod(); auto& updates_lod = updates_var->Get().lod(); diff --git a/paddle/fluid/operators/sync_batch_norm_op.cc b/paddle/fluid/operators/sync_batch_norm_op.cc index d2a4d173ad78fc..2fc8268f710868 100644 --- a/paddle/fluid/operators/sync_batch_norm_op.cc +++ b/paddle/fluid/operators/sync_batch_norm_op.cc @@ -33,7 +33,7 @@ class SyncBatchNormGradMaker : public framework::SingleGradOpMaker { op->SetInput("SavedVariance", this->Output("SavedVariance")); // used when setting use_global_stats True during training - if (BOOST_GET_CONST(bool, this->GetAttr("use_global_stats"))) { + if (PADDLE_GET_CONST(bool, this->GetAttr("use_global_stats"))) { op->SetInput("Mean", this->Output("MeanOut")); op->SetInput("Variance", this->Output("VarianceOut")); } diff --git a/paddle/fluid/operators/uniform_random_op.cc b/paddle/fluid/operators/uniform_random_op.cc index 234eecbd9aa0e6..2fda636e8809bc 100644 --- a/paddle/fluid/operators/uniform_random_op.cc +++ b/paddle/fluid/operators/uniform_random_op.cc @@ -208,7 +208,7 @@ class UniformRandomOpVarTypeInference : public framework::VarTypeInference { public: void operator()(framework::InferVarTypeContext *ctx) const override { auto var_data_type = static_cast( - BOOST_GET_CONST(int, ctx->GetAttr("dtype"))); + PADDLE_GET_CONST(int, ctx->GetAttr("dtype"))); if (ctx->GetOutputType("Out") != framework::proto::VarType::SELECTED_ROWS) { ctx->SetOutputType("Out", framework::proto::VarType::LOD_TENSOR); diff --git a/paddle/fluid/operators/var_conv_2d_op.cc b/paddle/fluid/operators/var_conv_2d_op.cc index 53feefef3e1cc6..3aec6b8356877a 100644 --- a/paddle/fluid/operators/var_conv_2d_op.cc +++ b/paddle/fluid/operators/var_conv_2d_op.cc @@ -124,7 +124,7 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { if (ctx->IsRuntime()) { framework::Variable* x_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("X")[0]); const auto& x_lod = x_var->Get().lod(); PADDLE_ENFORCE_EQ( !x_lod.empty(), @@ -145,7 +145,7 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { x_dims)); framework::Variable* row_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("ROW")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("ROW")[0]); const auto& row_lod = row_var->Get().lod(); PADDLE_ENFORCE_EQ(!row_lod.empty(), true, @@ -154,7 +154,7 @@ void VarConv2dOP::InferShape(framework::InferShapeContext* ctx) const { "contain LoD information.")); framework::Variable* col_var = - BOOST_GET(framework::Variable*, ctx->GetInputVarPtrs("COLUMN")[0]); + PADDLE_GET(framework::Variable*, ctx->GetInputVarPtrs("COLUMN")[0]); const auto& col_lod = col_var->Get().lod(); PADDLE_ENFORCE_EQ(!col_lod.empty(), true, diff --git a/paddle/fluid/platform/device/ipu/ipu_compiler.cc b/paddle/fluid/platform/device/ipu/ipu_compiler.cc index 09e68ab5187467..a113bbbe26579e 100644 --- a/paddle/fluid/platform/device/ipu/ipu_compiler.cc +++ b/paddle/fluid/platform/device/ipu/ipu_compiler.cc @@ -200,7 +200,7 @@ popart::DataType DataTypeFromStr(const std::string& str) { template T GetAttrAllowNull(std::string attr, OpDesc* op_desc) { if (op_desc->HasAttr(attr)) { - return BOOST_GET_CONST(T, op_desc->GetAttr(attr)); + return PADDLE_GET_CONST(T, op_desc->GetAttr(attr)); } else { return {}; } @@ -209,7 +209,7 @@ T GetAttrAllowNull(std::string attr, OpDesc* op_desc) { template nonstd::optional GetOptAttrAllowNull(std::string attr, OpDesc* op_desc) { if (op_desc->HasAttr(attr)) { - return BOOST_GET_CONST(T, op_desc->GetAttr(attr)); + return PADDLE_GET_CONST(T, op_desc->GetAttr(attr)); } else { return {}; } @@ -218,7 +218,7 @@ nonstd::optional GetOptAttrAllowNull(std::string attr, OpDesc* op_desc) { template TO GetCastSigAttrAllowNull(std::string attr, OpDesc* op_desc) { if (op_desc->HasAttr(attr)) { - auto x = BOOST_GET_CONST(TI, op_desc->GetAttr(attr)); + auto x = PADDLE_GET_CONST(TI, op_desc->GetAttr(attr)); return static_cast(x); } else { return {}; @@ -241,7 +241,7 @@ struct NameScopeHelper { NameScopeHelper::NameScopeHelper(const OpDesc* op, popart::Builder* builder) : builder_(builder) { - auto op_namescope = BOOST_GET_CONST(std::string, op->GetAttr(sOpNamescope)); + auto op_namescope = PADDLE_GET_CONST(std::string, op->GetAttr(sOpNamescope)); if (op_namescope.empty() || op_namescope == "/") { return; } @@ -388,8 +388,8 @@ void Compiler::LowerConstants(const Scope* scope) { auto op_type = op_desc->Type(); if (op_type == "popart_constant") { auto shape = - BOOST_GET_CONST(std::vector, op_desc->GetAttr("dims")); - auto dtype_ = BOOST_GET_CONST(int, op_desc->GetAttr("dtype")); + PADDLE_GET_CONST(std::vector, op_desc->GetAttr("dims")); + auto dtype_ = PADDLE_GET_CONST(int, op_desc->GetAttr("dtype")); auto dtype = PopartDType2VarType( OnnxDType2PopartType(static_cast(dtype_))); auto tensor_name = GetOpOutputs(op_desc).front(); @@ -487,7 +487,7 @@ void Compiler::LowerBody() { paddle::visit(visitor, attr.second); } auto __op_type = - BOOST_GET_CONST(std::string, op_desc->GetAttr("__op_type")); + PADDLE_GET_CONST(std::string, op_desc->GetAttr("__op_type")); VLOG(10) << "Build graph from custom op: " << __op_type; auto it = custom_ops_.find(__op_type); NameScopeHelper ns_helper(op_desc, builder_.get()); @@ -502,8 +502,8 @@ void Compiler::LowerBody() { auto inputs = GetOpInputs(op_desc); auto debug_context = BuildDebugContext(op_desc); auto print_gradient = - BOOST_GET_CONST(int64_t, op_desc->GetAttr("print_gradient")); - auto title = BOOST_GET_CONST(std::string, op_desc->GetAttr("title")); + PADDLE_GET_CONST(int64_t, op_desc->GetAttr("print_gradient")); + auto title = PADDLE_GET_CONST(std::string, op_desc->GetAttr("title")); NameScopeHelper ns_helper(op_desc, builder_.get()); auto output_ids = builder_->aiGraphcoreOpset1().printtensor( inputs, print_gradient, debug_context, title); @@ -529,21 +529,21 @@ void Compiler::LowerOptimizer(const Scope* scope) { auto op_type = op_desc->Type(); if (op_type == "popart_optimizer") { auto raw_type = - BOOST_GET_CONST(std::string, op_desc->GetAttr("raw_type")); + PADDLE_GET_CONST(std::string, op_desc->GetAttr("raw_type")); resources_->optimizer_type = raw_type; resources_->with_lr_sched = - BOOST_GET_CONST(bool, op_desc->GetAttr("with_lr_sched")); + PADDLE_GET_CONST(bool, op_desc->GetAttr("with_lr_sched")); if (ipu_strategy_->is_dynamic) { // loss_var in dy2static is set by identity_loss. And lr is // passed by ipu_strategy. resources_->lr = ipu_strategy_->lr; } else { auto loss_var = - BOOST_GET_CONST(std::string, op_desc->GetAttr("loss_var")); + PADDLE_GET_CONST(std::string, op_desc->GetAttr("loss_var")); resources_->loss_var = resources_->tensors[loss_var]; if (op_desc->HasAttr("lr_var")) { auto lr_var = - BOOST_GET_CONST(std::string, op_desc->GetAttr("lr_var")); + PADDLE_GET_CONST(std::string, op_desc->GetAttr("lr_var")); resources_->lr_var = lr_var; resources_->lr = GetSingleVarFromScope(scope, lr_var); } else { @@ -555,16 +555,16 @@ void Compiler::LowerOptimizer(const Scope* scope) { VLOG(10) << "Set initial lr: " << resources_->lr; // Get the type of optimizer - auto type = BOOST_GET_CONST(std::string, op_desc->GetAttr("type")); + auto type = PADDLE_GET_CONST(std::string, op_desc->GetAttr("type")); // Set weight decay by tensor names for Lamb - auto weight_decay_vars = BOOST_GET_CONST( + auto weight_decay_vars = PADDLE_GET_CONST( std::vector, op_desc->GetAttr("weight_decay_vars")); - auto weight_decay_values = BOOST_GET_CONST( + auto weight_decay_values = PADDLE_GET_CONST( std::vector, op_desc->GetAttr("weight_decay_values")); // Get the maximum permissible value for gradient clipping std::vector clip_norm_settings = {}; if (op_desc->HasAttr("clip_norm")) { - auto clip_norm = BOOST_GET_CONST(float, op_desc->GetAttr("clip_norm")); + auto clip_norm = PADDLE_GET_CONST(float, op_desc->GetAttr("clip_norm")); clip_norm_settings.push_back( popart::ClipNormSettings::clipAllWeights(clip_norm)); VLOG(10) << "Set the global gradient clipping with the maximum " @@ -580,8 +580,8 @@ void Compiler::LowerOptimizer(const Scope* scope) { if (type == "sgd") { auto weight_decay = - BOOST_GET_CONST(float, op_desc->GetAttr("weight_decay")); - auto momentum = BOOST_GET_CONST(float, op_desc->GetAttr("momentum")); + PADDLE_GET_CONST(float, op_desc->GetAttr("weight_decay")); + auto momentum = PADDLE_GET_CONST(float, op_desc->GetAttr("momentum")); resources_->optimizer_fn = [=](float lr) { return std::make_unique( popart::OptimizerValue(lr, false), @@ -602,20 +602,20 @@ void Compiler::LowerOptimizer(const Scope* scope) { clip_norm_settings); } else if (type == "adam") { auto weight_decay = - BOOST_GET_CONST(float, op_desc->GetAttr("weight_decay")); - auto beta1 = BOOST_GET_CONST(float, op_desc->GetAttr("beta1")); - auto beta2 = BOOST_GET_CONST(float, op_desc->GetAttr("beta2")); - auto eps = BOOST_GET_CONST(float, op_desc->GetAttr("eps")); + PADDLE_GET_CONST(float, op_desc->GetAttr("weight_decay")); + auto beta1 = PADDLE_GET_CONST(float, op_desc->GetAttr("beta1")); + auto beta2 = PADDLE_GET_CONST(float, op_desc->GetAttr("beta2")); + auto eps = PADDLE_GET_CONST(float, op_desc->GetAttr("eps")); auto mwn = ipu_strategy_->max_weight_norm; VLOG(10) << "set max_weight_norm: " << mwn; auto adam_mode_ = - BOOST_GET_CONST(std::string, op_desc->GetAttr("adam_mode")); + PADDLE_GET_CONST(std::string, op_desc->GetAttr("adam_mode")); auto adam_mode = AdamModeFromStr(adam_mode_, ipu_strategy_->use_no_bias_optimizer); auto weight_decay_mode_ = ipu_strategy_->weight_decay_mode; auto scaled_optimizer_state_ = ipu_strategy_->scaled_optimizer_state; if (weight_decay_mode_.empty()) { - weight_decay_mode_ = BOOST_GET_CONST( + weight_decay_mode_ = PADDLE_GET_CONST( std::string, op_desc->GetAttr("weight_decay_mode")); } auto weight_decay_mode = WeightDecayModeFromStr(weight_decay_mode_); @@ -726,17 +726,17 @@ void Compiler::LowerOptimizer(const Scope* scope) { scaled_optimizer_state_); } } else if (type == "adaptive") { - auto alpha = BOOST_GET_CONST(float, op_desc->GetAttr("alpha")); - auto momentum = BOOST_GET_CONST(float, op_desc->GetAttr("momentum")); - auto eps = BOOST_GET_CONST(float, op_desc->GetAttr("eps")); + auto alpha = PADDLE_GET_CONST(float, op_desc->GetAttr("alpha")); + auto momentum = PADDLE_GET_CONST(float, op_desc->GetAttr("momentum")); + auto eps = PADDLE_GET_CONST(float, op_desc->GetAttr("eps")); auto weight_decay = - BOOST_GET_CONST(float, op_desc->GetAttr("weight_decay")); + PADDLE_GET_CONST(float, op_desc->GetAttr("weight_decay")); auto adaptive_mode_ = - BOOST_GET_CONST(std::string, op_desc->GetAttr("adaptive_mode")); + PADDLE_GET_CONST(std::string, op_desc->GetAttr("adaptive_mode")); auto adaptive_mode = AdaptiveModeFromStr(adaptive_mode_); auto weight_decay_mode_ = ipu_strategy_->weight_decay_mode; if (weight_decay_mode_.empty()) { - weight_decay_mode_ = BOOST_GET_CONST( + weight_decay_mode_ = PADDLE_GET_CONST( std::string, op_desc->GetAttr("weight_decay_mode")); } auto weight_decay_mode = WeightDecayModeFromStr(weight_decay_mode_); @@ -797,12 +797,12 @@ void Compiler::PostLower(const std::vector& tensor_ids, auto tensor_ids_set = std::set(tensor_ids.begin(), tensor_ids.end()); if (op_desc->HasAttr(sIpuIndexAttr)) { - auto ipu_index = BOOST_GET_CONST(int, op_desc->GetAttr(sIpuIndexAttr)); + auto ipu_index = PADDLE_GET_CONST(int, op_desc->GetAttr(sIpuIndexAttr)); builder_->virtualGraph(tensor_ids_set, ipu_index); VLOG(10) << "set " << sIpuIndexAttr << " = " << ipu_index << " for op: " << op_desc->Type(); if (op_desc->HasAttr(sIpuStageAttr)) { - auto ipu_stage = BOOST_GET_CONST(int, op_desc->GetAttr(sIpuStageAttr)); + auto ipu_stage = PADDLE_GET_CONST(int, op_desc->GetAttr(sIpuStageAttr)); builder_->pipelineStage(tensor_ids_set, ipu_stage); VLOG(10) << "set " << sIpuStageAttr << " = " << ipu_stage << " for op: " << op_desc->Type(); @@ -838,12 +838,12 @@ void Compiler::PostLower(const std::string& tensor_id, bool skip_pipline) { // Set pipline if (!skip_pipline && op_desc->HasAttr(sIpuIndexAttr)) { - auto ipu_index = BOOST_GET_CONST(int, op_desc->GetAttr(sIpuIndexAttr)); + auto ipu_index = PADDLE_GET_CONST(int, op_desc->GetAttr(sIpuIndexAttr)); builder_->virtualGraph(tensor_id, ipu_index); VLOG(10) << "set " << sIpuIndexAttr << " = " << ipu_index << " for op: " << op_desc->Type(); if (op_desc->HasAttr(sIpuStageAttr)) { - auto ipu_stage = BOOST_GET_CONST(int, op_desc->GetAttr(sIpuStageAttr)); + auto ipu_stage = PADDLE_GET_CONST(int, op_desc->GetAttr(sIpuStageAttr)); builder_->pipelineStage(tensor_id, ipu_stage); VLOG(10) << "set " << sIpuStageAttr << " = " << ipu_stage << " for op: " << op_desc->Type(); @@ -864,7 +864,8 @@ void Compiler::PostLower(const std::string& tensor_id, } } else { if (op_desc->HasAttr(sAvailMemAttribute)) { - auto amp = BOOST_GET_CONST(float, op_desc->GetAttr(sAvailMemAttribute)); + auto amp = + PADDLE_GET_CONST(float, op_desc->GetAttr(sAvailMemAttribute)); if (amp < 0.0f || amp > 1.0) { PADDLE_THROW(platform::errors::InvalidArgument( "AvailableMemoryProportion %f is invalid, which should be in " @@ -881,11 +882,11 @@ void Compiler::PostLower(const std::string& tensor_id, // Set serialize matmul if (op_desc->HasAttr(sMatmulSerializeFactor)) { auto factor = - BOOST_GET_CONST(int, op_desc->GetAttr(sMatmulSerializeFactor)); + PADDLE_GET_CONST(int, op_desc->GetAttr(sMatmulSerializeFactor)); std::string mode = "output_channels"; if (op_desc->HasAttr(sMatmulSerializeMode)) { - mode = BOOST_GET_CONST(std::string, - op_desc->GetAttr(sMatmulSerializeMode)); + mode = PADDLE_GET_CONST(std::string, + op_desc->GetAttr(sMatmulSerializeMode)); } builder_->setSerializeMatMul({tensor_id}, mode, factor, true); } @@ -937,7 +938,7 @@ const std::vector& Compiler::GetOpOutputs(const OpDesc* op) { popart::DebugContext Compiler::BuildDebugContext(const OpDesc* op) { auto op_identify_id = - BOOST_GET_CONST(std::string, op->GetAttr(sOpIdentifyIdAttr)); + PADDLE_GET_CONST(std::string, op->GetAttr(sOpIdentifyIdAttr)); VLOG(10) << "op_identify_id of op: " << op->Type() << " is " << op_identify_id; return popart::DebugContext(op_identify_id); diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/activation_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/activation_ops.cc index 73e77f64200e4d..082bb17cac86b9 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/activation_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/activation_ops.cc @@ -121,8 +121,8 @@ Node *tanh_handler(Graph *graph, Node *node) { Node *brelu_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto t_min_ = BOOST_GET_CONST(float, op->GetAttr("t_min")); - auto t_max_ = BOOST_GET_CONST(float, op->GetAttr("t_max")); + auto t_min_ = PADDLE_GET_CONST(float, op->GetAttr("t_min")); + auto t_max_ = PADDLE_GET_CONST(float, op->GetAttr("t_max")); auto x = GetInputVarNode("X", node); auto cli_min = CreateConst( @@ -138,7 +138,7 @@ Node *brelu_handler(Graph *graph, Node *node) { Node *gelu_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto approximate_ = BOOST_GET_CONST(bool, op->GetAttr("approximate")); + auto approximate_ = PADDLE_GET_CONST(bool, op->GetAttr("approximate")); if (approximate_) { return activation_op_handler(graph, node, "popart_gelu_v2"); } else { @@ -189,14 +189,14 @@ Node *gelu_handler(Graph *graph, Node *node) { } Node *log_softmax_handler(Graph *graph, Node *node) { - auto axis = BOOST_GET_CONST(int, node->Op()->GetAttr("axis")); + auto axis = PADDLE_GET_CONST(int, node->Op()->GetAttr("axis")); auto new_softmax = CreateSoftmaxOpset11(graph, node, node->inputs, {}, axis); return CreateBaseOp( graph, node, "popart_log", new_softmax->outputs, node->outputs); } Node *elu_handler(Graph *graph, Node *node) { - auto alpha_ = BOOST_GET_CONST(float, node->Op()->GetAttr("alpha")); + auto alpha_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("alpha")); return CreateBaseOp(graph, node, "popart_elu", @@ -208,7 +208,7 @@ Node *elu_handler(Graph *graph, Node *node) { } Node *hard_shrink_handler(Graph *graph, Node *node) { - auto threshold_ = BOOST_GET_CONST(float, node->Op()->GetAttr("threshold")); + auto threshold_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("threshold")); return CreateBaseOp(graph, node, "popart_shrink", @@ -221,8 +221,8 @@ Node *hard_shrink_handler(Graph *graph, Node *node) { } Node *hard_sigmoid_handler(Graph *graph, Node *node) { - auto slope_ = BOOST_GET_CONST(float, node->Op()->GetAttr("slope")); - auto offset_ = BOOST_GET_CONST(float, node->Op()->GetAttr("offset")); + auto slope_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("slope")); + auto offset_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("offset")); return CreateBaseOp(graph, node, "popart_hardsigmoid", @@ -236,9 +236,9 @@ Node *hard_sigmoid_handler(Graph *graph, Node *node) { Node *hard_swish_handler(Graph *graph, Node *node) { auto x = GetInputVarNode("X", node); - auto scale_ = BOOST_GET_CONST(float, node->Op()->GetAttr("scale")); - auto offset_ = BOOST_GET_CONST(float, node->Op()->GetAttr("offset")); - auto threshold_ = BOOST_GET_CONST(float, node->Op()->GetAttr("threshold")); + auto scale_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("scale")); + auto offset_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("offset")); + auto threshold_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("threshold")); auto scale_node = CreateConst(graph, node, std::vector{scale_}, {1}, GetVarDType(x)) ->outputs.front(); @@ -269,7 +269,7 @@ Node *hard_swish_handler(Graph *graph, Node *node) { } Node *leaky_relu_handler(Graph *graph, Node *node) { - auto alpha_ = BOOST_GET_CONST(float, node->Op()->GetAttr("alpha")); + auto alpha_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("alpha")); return CreateBaseOp(graph, node, "popart_leakyrelu", @@ -321,7 +321,7 @@ Node *logsigmoid_handler(Graph *graph, Node *node) { } Node *mish_handler(Graph *graph, Node *node) { - auto threshold_ = BOOST_GET_CONST(float, node->Op()->GetAttr("threshold")); + auto threshold_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("threshold")); if (!is_float_equal(threshold_, 20.0f)) { PADDLE_THROW(platform::errors::Unimplemented( "For mish op, only support threshold = 20.0")); @@ -374,7 +374,7 @@ Node *prelu_handler(Graph *graph, Node *node) { } Node *relu6_handler(Graph *graph, Node *node) { - auto threshold_ = BOOST_GET_CONST(float, node->Op()->GetAttr("threshold")); + auto threshold_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("threshold")); auto cli_min = CreateConst( graph, node, std::vector{0.0}, {1}, ONNXDataType::FLOAT) @@ -398,8 +398,8 @@ Node *rsqrt_handler(Graph *graph, Node *node) { } Node *selu_handler(Graph *graph, Node *node) { - auto alpha_ = BOOST_GET_CONST(float, node->Op()->GetAttr("alpha")); - auto scale_ = BOOST_GET_CONST(float, node->Op()->GetAttr("scale")); + auto alpha_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("alpha")); + auto scale_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("scale")); return CreateBaseOp(graph, node, "popart_selu", @@ -419,7 +419,7 @@ Node *silu_handler(Graph *graph, Node *node) { } Node *softshrink_handler(Graph *graph, Node *node) { - auto lambda_ = BOOST_GET_CONST(float, node->Op()->GetAttr("lambda")); + auto lambda_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("lambda")); return CreateBaseOp(graph, node, "popart_shrink", @@ -439,7 +439,7 @@ Node *square_handler(Graph *graph, Node *node) { Node *swish_handler(Graph *graph, Node *node) { auto x = GetInputVarNode("X", node); auto out = GetOutputVarNode("Out", node); - auto beta_ = BOOST_GET_CONST(float, node->Op()->GetAttr("beta")); + auto beta_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("beta")); auto beta_node = CreateConst(graph, node, std::vector{beta_}, {1}, GetVarDType(x)) ->outputs.front(); @@ -459,7 +459,7 @@ Node *tanh_shrink_handler(Graph *graph, Node *node) { } Node *thresholded_relu_handler(Graph *graph, Node *node) { - auto threshold_ = BOOST_GET_CONST(float, node->Op()->GetAttr("threshold")); + auto threshold_ = PADDLE_GET_CONST(float, node->Op()->GetAttr("threshold")); auto x = GetInputVarNode("X", node); return CreateBaseOp(graph, node, diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/canonicalization_utils.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/canonicalization_utils.cc index c4960616b9db02..a738365681d04f 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/canonicalization_utils.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/canonicalization_utils.cc @@ -146,7 +146,7 @@ void MarkNodeForDeletion(Node *node) { node->Op()->SetAttr("delete_node", 1); } bool IsMarkedForDeletion(Node *node) { return node->Op()->HasAttr("delete_node") && - BOOST_GET_CONST(int, node->Op()->GetAttr("delete_node")) > 0; + PADDLE_GET_CONST(int, node->Op()->GetAttr("delete_node")) > 0; } int RemoveTailReduction(Graph *graph, diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/elementwise_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/elementwise_ops.cc index f01791f9d43c73..1ac7bdec42235d 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/elementwise_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/elementwise_ops.cc @@ -30,7 +30,7 @@ Node *elementwise_op_handler(Graph *graph, auto y_shape = GetInputVarNode("Y", node)->Var()->GetShape(); int64_t y_rank = y_shape.size(); - auto axis = BOOST_GET_CONST(int, op->GetAttr("axis")); + auto axis = PADDLE_GET_CONST(int, op->GetAttr("axis")); if (axis == -1 || axis == x_rank - 1 || x_rank == y_rank) { auto new_node = CreateBaseOp(graph, diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/loss_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/loss_ops.cc index 438304fcfc7097..aa4c3638868d2b 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/loss_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/loss_ops.cc @@ -29,7 +29,7 @@ bool is_dynamic_graph() { Node *identity_loss_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto reduction = BOOST_GET_CONST(int, op->GetAttr("reduction")); + auto reduction = PADDLE_GET_CONST(int, op->GetAttr("reduction")); return CreateIdentityLossOp( graph, node, node->inputs, node->outputs, reduction); } @@ -181,7 +181,7 @@ Node *cross_entropy2_handler(Graph *graph, Node *node) { auto logits = GetInputVarNode("X", node); auto label = GetInputVarNode("Label", node); auto output = GetOutputVarNode("Y", node); - auto ignore_index = BOOST_GET_CONST(int, op->GetAttr("ignore_index")); + auto ignore_index = PADDLE_GET_CONST(int, op->GetAttr("ignore_index")); return cross_entropy_general_handler(graph, node, logits, @@ -199,9 +199,9 @@ Node *softmax_with_cross_entropy_handler(Graph *graph, Node *node) { auto logits = GetInputVarNode("Logits", node); auto label = GetInputVarNode("Label", node); auto output = GetOutputVarNode("Loss", node); - auto ignore_index = BOOST_GET_CONST(int, op->GetAttr("ignore_index")); - auto axis = BOOST_GET_CONST(int, op->GetAttr("axis")); - auto soft_label = BOOST_GET_CONST(bool, op->GetAttr("soft_label")); + auto ignore_index = PADDLE_GET_CONST(int, op->GetAttr("ignore_index")); + auto axis = PADDLE_GET_CONST(int, op->GetAttr("axis")); + auto soft_label = PADDLE_GET_CONST(bool, op->GetAttr("soft_label")); logits = CreateSoftmaxOpset11( graph, node, {logits}, {GetOutputVarNode("Softmax", node)}, axis) @@ -220,7 +220,7 @@ Node *softmax_with_cross_entropy_handler(Graph *graph, Node *node) { Node *kldiv_loss_handler(Graph *graph, Node *node) { auto *op = node->Op(); auto reduction = ConvertToPopartReduction( - BOOST_GET_CONST(std::string, op->GetAttr("reduction"))); + PADDLE_GET_CONST(std::string, op->GetAttr("reduction"))); if (reduction == 2) { reduction = RemoveTailReduction(graph, node, "Loss"); } @@ -371,7 +371,7 @@ Node *huber_loss_handler(Graph *graph, Node *node) { ->outputs.front(); // const delta - auto delta_value = BOOST_GET_CONST(float, op->GetAttr("delta")); + auto delta_value = PADDLE_GET_CONST(float, op->GetAttr("delta")); auto delta = CreateConst( graph, node, std::vector{delta_value}, {1}, GetVarDType(x)) @@ -433,8 +433,8 @@ Node *warpctc_handler(Graph *graph, Node *node) { auto label = GetInputVarNode("Label", node); auto logits_length = GetInputVarNode("LogitsLength", node); auto label_length = GetInputVarNode("LabelLength", node); - auto blank = BOOST_GET_CONST(int, op->GetAttr("blank")); - auto norm_by_times = BOOST_GET_CONST(bool, op->GetAttr("norm_by_times")); + auto blank = PADDLE_GET_CONST(int, op->GetAttr("blank")); + auto norm_by_times = PADDLE_GET_CONST(bool, op->GetAttr("norm_by_times")); int reduction = 2; if (is_dynamic_graph()) { reduction = RemoveTailReduction(graph, node, "Loss"); diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/math_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/math_ops.cc index ddd7d9453cfa57..b144148c06ebfa 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/math_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/math_ops.cc @@ -43,7 +43,7 @@ Node *pow_handler(Graph *graph, Node *node) { node->outputs); } else { // Op(pow) -> Op(Constant)->Var(const_out)->Op(Pow) - auto value_ = BOOST_GET_CONST(float, op->GetAttr("factor")); + auto value_ = PADDLE_GET_CONST(float, op->GetAttr("factor")); auto new_node_const = CreateConst(graph, node, std::vector{value_}, @@ -61,8 +61,8 @@ Node *pow_handler(Graph *graph, Node *node) { Node *mul_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto x_num_col_dims = BOOST_GET_CONST(int, op->GetAttr("x_num_col_dims")); - auto y_num_col_dims = BOOST_GET_CONST(int, op->GetAttr("y_num_col_dims")); + auto x_num_col_dims = PADDLE_GET_CONST(int, op->GetAttr("x_num_col_dims")); + auto y_num_col_dims = PADDLE_GET_CONST(int, op->GetAttr("y_num_col_dims")); auto x_shape_ = GetInputVarNode("X", node)->Var()->GetShape(); auto y_shape_ = GetInputVarNode("Y", node)->Var()->GetShape(); @@ -111,9 +111,9 @@ Node *mul_handler(Graph *graph, Node *node) { Node *matmul_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto transpose_x = BOOST_GET_CONST(bool, op->GetAttr("transpose_X")); - auto transpose_y = BOOST_GET_CONST(bool, op->GetAttr("transpose_Y")); - auto alpha = BOOST_GET_CONST(float, op->GetAttr("alpha")); + auto transpose_x = PADDLE_GET_CONST(bool, op->GetAttr("transpose_X")); + auto transpose_y = PADDLE_GET_CONST(bool, op->GetAttr("transpose_Y")); + auto alpha = PADDLE_GET_CONST(float, op->GetAttr("alpha")); Node *x_node = GetInputVarNode("X", node); Node *y_node = GetInputVarNode("Y", node); int x_rank = x_node->Var()->GetShape().size(); @@ -201,16 +201,16 @@ Node *softmax_handler(Graph *graph, Node *node) { auto *op = node->Op(); int axis = -1; if (op->HasAttr("axis")) { - axis = BOOST_GET_CONST(int, op->GetAttr("axis")); + axis = PADDLE_GET_CONST(int, op->GetAttr("axis")); } return CreateSoftmaxOpset11(graph, node, node->inputs, node->outputs, axis); } Node *scale_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto bias_ = BOOST_GET_CONST(float, op->GetAttr("bias")); + auto bias_ = PADDLE_GET_CONST(float, op->GetAttr("bias")); auto bias_after_scale_ = - BOOST_GET_CONST(bool, op->GetAttr("bias_after_scale")); + PADDLE_GET_CONST(bool, op->GetAttr("bias_after_scale")); auto data_type_ = GetInputVarNode("X", node)->Var()->GetDataType(); auto cast = @@ -244,7 +244,7 @@ Node *scale_handler(Graph *graph, Node *node) { } } } else { - auto scale_ = BOOST_GET_CONST(float, op->GetAttr("scale")); + auto scale_ = PADDLE_GET_CONST(float, op->GetAttr("scale")); if (is_float_equal(bias_, 0.0) && is_float_equal(scale_, 1.0)) { return CreateBaseOp(graph, node, @@ -331,11 +331,11 @@ Node *scale_handler(Graph *graph, Node *node) { Node *cumsum_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto exclusive = BOOST_GET_CONST(bool, op->GetAttr("exclusive")); + auto exclusive = PADDLE_GET_CONST(bool, op->GetAttr("exclusive")); int64_t popart_exclusive = 1 ? exclusive : 0; - auto reverse = BOOST_GET_CONST(bool, op->GetAttr("reverse")); + auto reverse = PADDLE_GET_CONST(bool, op->GetAttr("reverse")); int64_t popart_reverse = 1 ? reverse : 0; - auto axis = BOOST_GET_CONST(int, op->GetAttr("axis")); + auto axis = PADDLE_GET_CONST(int, op->GetAttr("axis")); auto axis_node = CreateConst(graph, node, {}, @@ -374,8 +374,8 @@ Node *cumsum_handler(Graph *graph, Node *node) { Node *matmul_v2_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto transpose_x = BOOST_GET_CONST(bool, op->GetAttr("trans_x")); - auto transpose_y = BOOST_GET_CONST(bool, op->GetAttr("trans_y")); + auto transpose_x = PADDLE_GET_CONST(bool, op->GetAttr("trans_x")); + auto transpose_y = PADDLE_GET_CONST(bool, op->GetAttr("trans_y")); Node *x_node = GetInputVarNode("X", node); Node *y_node = GetInputVarNode("Y", node); int x_rank = x_node->Var()->GetShape().size(); @@ -433,7 +433,7 @@ Node *bmm_handler(Graph *graph, Node *node) { Node *arg_max_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axis = BOOST_GET_CONST(int64_t, op->GetAttr("axis")); + auto axis = PADDLE_GET_CONST(int64_t, op->GetAttr("axis")); return CreateBaseOp(graph, node, "popart_argmax", @@ -444,7 +444,7 @@ Node *arg_max_handler(Graph *graph, Node *node) { Node *arg_min_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axis = BOOST_GET_CONST(int64_t, op->GetAttr("axis")); + auto axis = PADDLE_GET_CONST(int64_t, op->GetAttr("axis")); return CreateBaseOp(graph, node, "popart_argmin", diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/nn_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/nn_ops.cc index 21c9beade3082e..e28531f349d143 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/nn_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/nn_ops.cc @@ -23,16 +23,17 @@ namespace { Node *conv2d_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto dilations_ = BOOST_GET_CONST(std::vector, op->GetAttr("dilations")); + auto dilations_ = + PADDLE_GET_CONST(std::vector, op->GetAttr("dilations")); auto dilations = std::vector{dilations_.begin(), dilations_.end()}; - auto group_ = BOOST_GET_CONST(int, op->GetAttr("groups")); - auto pads_ = BOOST_GET_CONST(std::vector, op->GetAttr("paddings")); + auto group_ = PADDLE_GET_CONST(int, op->GetAttr("groups")); + auto pads_ = PADDLE_GET_CONST(std::vector, op->GetAttr("paddings")); if (pads_.size() == 2) { pads_.push_back(pads_[0]); pads_.push_back(pads_[1]); } auto pads = std::vector{pads_.begin(), pads_.end()}; - auto stride_ = BOOST_GET_CONST(std::vector, op->GetAttr("strides")); + auto stride_ = PADDLE_GET_CONST(std::vector, op->GetAttr("strides")); auto stride = std::vector{stride_.begin(), stride_.end()}; if (!op->Input("Bias").empty()) { return CreateConv(graph, @@ -78,10 +79,10 @@ Node *batch_norm_handler(Graph *graph, Node *node) { bool is_test; if (is_test_type == 0) { // int - is_test = BOOST_GET_CONST(int, op->GetAttr("is_test")); + is_test = PADDLE_GET_CONST(int, op->GetAttr("is_test")); } else { // bool - is_test = BOOST_GET_CONST(bool, op->GetAttr("is_test")); + is_test = PADDLE_GET_CONST(bool, op->GetAttr("is_test")); } outputs.push_back(GetOutputVarNode("Y", node)); if (!is_test) { @@ -92,8 +93,8 @@ Node *batch_norm_handler(Graph *graph, Node *node) { num_outputs = 5; } // outputs.push_back(GetOutputVarNode("ReserveSpace", node)); - auto momentum = BOOST_GET_CONST(float, op->GetAttr("momentum")); - auto epsilon = BOOST_GET_CONST(float, op->GetAttr("epsilon")); + auto momentum = PADDLE_GET_CONST(float, op->GetAttr("momentum")); + auto epsilon = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); // data_layout return CreateBaseOp(graph, node, @@ -109,12 +110,13 @@ Node *batch_norm_handler(Graph *graph, Node *node) { Node *pool2d_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto pooling_type = BOOST_GET_CONST(std::string, op->GetAttr("pooling_type")); - auto global_pooling = BOOST_GET_CONST(bool, op->GetAttr("global_pooling")); + auto pooling_type = + PADDLE_GET_CONST(std::string, op->GetAttr("pooling_type")); + auto global_pooling = PADDLE_GET_CONST(bool, op->GetAttr("global_pooling")); if (op->HasAttr("adaptive")) { - auto adaptive = BOOST_GET_CONST(bool, op->GetAttr("adaptive")); + auto adaptive = PADDLE_GET_CONST(bool, op->GetAttr("adaptive")); if (adaptive) { - auto ksize = BOOST_GET_CONST(std::vector, op->GetAttr("ksize")); + auto ksize = PADDLE_GET_CONST(std::vector, op->GetAttr("ksize")); if (ksize[0] != 1 || ksize[1] != 1) { PADDLE_THROW(platform::errors::InvalidArgument( "Only support pool_size=1 with adaptive mode.")); @@ -140,24 +142,24 @@ Node *pool2d_handler(Graph *graph, Node *node) { } if (op->HasAttr("padding_algorithm")) { auto padding_algorithm = - BOOST_GET_CONST(std::string, op->GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, op->GetAttr("padding_algorithm")); if (padding_algorithm != "EXPLICIT") { PADDLE_THROW(platform::errors::InvalidArgument( "op pool2d with unkonwn padding_algorithm: %s", padding_algorithm)); } } - auto ksize = BOOST_GET_CONST(std::vector, op->GetAttr("ksize")); + auto ksize = PADDLE_GET_CONST(std::vector, op->GetAttr("ksize")); auto kernel_shape = std::vector{ksize.begin(), ksize.end()}; - auto ceil_mode_ = BOOST_GET_CONST(bool, op->GetAttr("ceil_mode")); + auto ceil_mode_ = PADDLE_GET_CONST(bool, op->GetAttr("ceil_mode")); auto ceil_mode = int64_t(ceil_mode_ ? 1 : 0); - auto paddings = BOOST_GET_CONST(std::vector, op->GetAttr("paddings")); + auto paddings = PADDLE_GET_CONST(std::vector, op->GetAttr("paddings")); auto pads = std::vector{paddings.begin(), paddings.end()}; if (pads.size() == 2) { pads.push_back(paddings[0]); pads.push_back(paddings[1]); } - auto strides_ = BOOST_GET_CONST(std::vector, op->GetAttr("strides")); + auto strides_ = PADDLE_GET_CONST(std::vector, op->GetAttr("strides")); auto strides = std::vector{strides_.begin(), strides_.end()}; if (pooling_type == "max") { int64_t num_outputs = 1; @@ -199,7 +201,7 @@ Node *pool2d_handler(Graph *graph, Node *node) { Node *max_pool2d_with_index_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto ksize = BOOST_GET_CONST(std::vector, op->GetAttr("ksize")); + auto ksize = PADDLE_GET_CONST(std::vector, op->GetAttr("ksize")); if (ksize[0] != 1 || ksize[1] != 1) { PADDLE_THROW(platform::errors::InvalidArgument( "Only support pool_size=1 with adaptive mode.")); @@ -213,8 +215,8 @@ Node *max_pool2d_with_index_handler(Graph *graph, Node *node) { Node *group_norm_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto epsilon_ = BOOST_GET_CONST(float, op->GetAttr("epsilon")); - auto groups_ = BOOST_GET_CONST(int, op->GetAttr("groups")); + auto epsilon_ = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); + auto groups_ = PADDLE_GET_CONST(int, op->GetAttr("groups")); auto groups = int64_t{groups_}; auto attrs_ = AttributeMap{{"epsilon", epsilon_}, {"num_groups", groups}}; @@ -230,7 +232,7 @@ Node *group_norm_handler(Graph *graph, Node *node) { Node *instance_norm_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto epsilon_ = BOOST_GET_CONST(float, op->GetAttr("epsilon")); + auto epsilon_ = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); auto attrs_ = AttributeMap{{"epsilon", epsilon_}}; std::vector inputs_ = {GetInputVarNode("X", node), @@ -243,9 +245,9 @@ Node *instance_norm_handler(Graph *graph, Node *node) { Node *layer_norm_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto begin_norm_axis_ = BOOST_GET_CONST(int, op->GetAttr("begin_norm_axis")); + auto begin_norm_axis_ = PADDLE_GET_CONST(int, op->GetAttr("begin_norm_axis")); auto input_shape_ = GetInputVarNode("X", node)->Var()->GetShape(); - auto epsilon_ = BOOST_GET_CONST(float, op->GetAttr("epsilon")); + auto epsilon_ = PADDLE_GET_CONST(float, op->GetAttr("epsilon")); int64_t groups_ = 1; auto groupnorm_attrs_ = @@ -316,17 +318,17 @@ Node *layer_norm_handler(Graph *graph, Node *node) { Node *dropout_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto dropout_prob_ = BOOST_GET_CONST(float, op->GetAttr("dropout_prob")); + auto dropout_prob_ = PADDLE_GET_CONST(float, op->GetAttr("dropout_prob")); auto dropout_implementation_ = - BOOST_GET_CONST(std::string, op->GetAttr("dropout_implementation")); + PADDLE_GET_CONST(std::string, op->GetAttr("dropout_implementation")); auto is_test_type_ = op->GetAttrType("is_test"); bool is_test_; if (is_test_type_ == 0) { // int - is_test_ = BOOST_GET_CONST(int, op->GetAttr("is_test")); + is_test_ = PADDLE_GET_CONST(int, op->GetAttr("is_test")); } else { // bool - is_test_ = BOOST_GET_CONST(bool, op->GetAttr("is_test")); + is_test_ = PADDLE_GET_CONST(bool, op->GetAttr("is_test")); } if (is_test_) { @@ -379,7 +381,7 @@ Node *dropout_handler(Graph *graph, Node *node) { Node *conv2d_transpose_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto data_format = BOOST_GET_CONST(std::string, op->GetAttr("data_format")); + auto data_format = PADDLE_GET_CONST(std::string, op->GetAttr("data_format")); if (data_format != "NCHW") { PADDLE_THROW( platform::errors::InvalidArgument("Only support NCHW as data_format.")); @@ -388,21 +390,22 @@ Node *conv2d_transpose_handler(Graph *graph, Node *node) { auto *kernel_info = GetInputVarNode("Filter", node); auto kernel_shape = kernel_info->Var()->GetShape(); - auto dilations_ = BOOST_GET_CONST(std::vector, op->GetAttr("dilations")); + auto dilations_ = + PADDLE_GET_CONST(std::vector, op->GetAttr("dilations")); auto dilations = std::vector{dilations_.begin(), dilations_.end()}; - auto strides_ = BOOST_GET_CONST(std::vector, op->GetAttr("strides")); + auto strides_ = PADDLE_GET_CONST(std::vector, op->GetAttr("strides")); auto strides = std::vector{strides_.begin(), strides_.end()}; auto output_padding_ = - BOOST_GET_CONST(std::vector, op->GetAttr("output_padding")); + PADDLE_GET_CONST(std::vector, op->GetAttr("output_padding")); auto output_padding = std::vector{output_padding_.begin(), output_padding_.end()}; - auto group_ = BOOST_GET_CONST(int, op->GetAttr("groups")); + auto group_ = PADDLE_GET_CONST(int, op->GetAttr("groups")); auto group = int64_t(group_); auto padding_algorithm = - BOOST_GET_CONST(std::string, op->GetAttr("padding_algorithm")); + PADDLE_GET_CONST(std::string, op->GetAttr("padding_algorithm")); - auto paddings_ = BOOST_GET_CONST(std::vector, op->GetAttr("paddings")); + auto paddings_ = PADDLE_GET_CONST(std::vector, op->GetAttr("paddings")); if (paddings_.size() == 2) { paddings_.push_back(paddings_[0]); paddings_.push_back(paddings_[1]); @@ -466,7 +469,7 @@ Node *conv2d_transpose_handler(Graph *graph, Node *node) { Node *affine_channel_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto data_layout = BOOST_GET_CONST(std::string, op->GetAttr("data_layout")); + auto data_layout = PADDLE_GET_CONST(std::string, op->GetAttr("data_layout")); if (data_layout != "NCHW") { PADDLE_THROW( platform::errors::InvalidArgument("Only support NCHW as data_format.")); @@ -509,14 +512,14 @@ Node *affine_channel_handler(Graph *graph, Node *node) { Node *interp_handler(Graph *graph, Node *node, const std::string &mode) { auto *op = node->Op(); - auto data_layout = BOOST_GET_CONST(std::string, op->GetAttr("data_layout")); + auto data_layout = PADDLE_GET_CONST(std::string, op->GetAttr("data_layout")); if (data_layout != "NCHW") { PADDLE_THROW( platform::errors::InvalidArgument("Only support NCHW as data_format.")); } - auto align_corners = BOOST_GET_CONST(bool, op->GetAttr("align_corners")); - auto align_mode = BOOST_GET_CONST(int, op->GetAttr("align_mode")); + auto align_corners = PADDLE_GET_CONST(bool, op->GetAttr("align_corners")); + auto align_mode = PADDLE_GET_CONST(int, op->GetAttr("align_mode")); auto paddle_target_dtype = VarType::FP32; auto onnx_target_dtype = ONNXDataType::FLOAT; @@ -594,9 +597,9 @@ Node *interp_handler(Graph *graph, Node *node, const std::string &mode) { ->outputs[0]; } else { // Get 'size' or 'scale' from attribute - auto out_d = BOOST_GET_CONST(int, op->GetAttr("out_d")); - auto out_h = BOOST_GET_CONST(int, op->GetAttr("out_h")); - auto out_w = BOOST_GET_CONST(int, op->GetAttr("out_w")); + auto out_d = PADDLE_GET_CONST(int, op->GetAttr("out_d")); + auto out_h = PADDLE_GET_CONST(int, op->GetAttr("out_h")); + auto out_w = PADDLE_GET_CONST(int, op->GetAttr("out_w")); if (out_d > 0 || out_w > 0 || out_h > 0) { std::vector out_size; if (GetInputVarNode("X", node)->Var()->GetShape().size() == 5) { @@ -617,7 +620,7 @@ Node *interp_handler(Graph *graph, Node *node, const std::string &mode) { ->outputs[0]; } else { auto scale_value = - BOOST_GET_CONST(std::vector, op->GetAttr("scale")); + PADDLE_GET_CONST(std::vector, op->GetAttr("scale")); float padding = 1.0; scale_value.insert(scale_value.begin(), padding); scale_value.insert(scale_value.begin(), padding); @@ -731,7 +734,7 @@ Node *data_norm_handler(Graph *graph, Node *node) { int slot_dim = -1; if (op->HasAttr("slot_dim")) { - slot_dim = BOOST_GET_CONST(int, op->GetAttr("slot_dim")); + slot_dim = PADDLE_GET_CONST(int, op->GetAttr("slot_dim")); } if (slot_dim > 0) { @@ -742,7 +745,7 @@ Node *data_norm_handler(Graph *graph, Node *node) { bool enable_scale_and_shift = false; if (op->HasAttr("enable_scale_and_shift")) { enable_scale_and_shift = - BOOST_GET_CONST(bool, op->GetAttr("enable_scale_and_shift")); + PADDLE_GET_CONST(bool, op->GetAttr("enable_scale_and_shift")); } auto *mean_arr = CreateBaseOp(graph, @@ -789,9 +792,9 @@ Node *data_norm_handler(Graph *graph, Node *node) { Node *pad_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto mode = BOOST_GET_CONST(std::string, op->GetAttr("mode")); - auto value = BOOST_GET_CONST(float, op->GetAttr("value")); - auto data_format = BOOST_GET_CONST(std::string, op->GetAttr("data_format")); + auto mode = PADDLE_GET_CONST(std::string, op->GetAttr("mode")); + auto value = PADDLE_GET_CONST(float, op->GetAttr("value")); + auto data_format = PADDLE_GET_CONST(std::string, op->GetAttr("data_format")); if (data_format == "NDHWC") { PADDLE_THROW( @@ -808,7 +811,7 @@ Node *pad_handler(Graph *graph, Node *node) { "Do not support Paddings as a inputs tensor")); } // Paddings -> Attr - auto paddings = BOOST_GET_CONST(std::vector, op->GetAttr("paddings")); + auto paddings = PADDLE_GET_CONST(std::vector, op->GetAttr("paddings")); std::vector new_paddings(10, 0); new_paddings[2] = paddings[4]; new_paddings[3] = paddings[2]; diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/other_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/other_ops.cc index 0b95f641695c1a..1cbe9eb4663824 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/other_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/other_ops.cc @@ -32,12 +32,12 @@ Node *custom_op_handler(Graph *graph, Node *node) { Node *print_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto print_phase = BOOST_GET_CONST(std::string, op->GetAttr("print_phase")); + auto print_phase = PADDLE_GET_CONST(std::string, op->GetAttr("print_phase")); int64_t print_gradient = 0; if (print_phase != "forward") { print_gradient = 1; } - auto title = BOOST_GET_CONST(std::string, op->GetAttr("message")); + auto title = PADDLE_GET_CONST(std::string, op->GetAttr("message")); if (title.empty()) { title = GetInputVarNode("In", node)->Var()->Name(); } @@ -56,10 +56,10 @@ Node *checkpointoutput_handler(Graph *graph, Node *node) { Node *custom_nll_loss_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto reduction = BOOST_GET_CONST(int, op->GetAttr("reduction")); - auto ignoreIndex = BOOST_GET_CONST(std::string, op->GetAttr("ignoreIndex")); + auto reduction = PADDLE_GET_CONST(int, op->GetAttr("reduction")); + auto ignoreIndex = PADDLE_GET_CONST(std::string, op->GetAttr("ignoreIndex")); auto inputIsLogProbability = - BOOST_GET_CONST(bool, op->GetAttr("inputIsLogProbability")); + PADDLE_GET_CONST(bool, op->GetAttr("inputIsLogProbability")); if (ignoreIndex == "None") { return CreateBaseOp(graph, node, diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/reduce_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/reduce_ops.cc index e1cc2de8bc547a..d44085b5d81228 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/reduce_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/reduce_ops.cc @@ -24,13 +24,13 @@ namespace { Node *reduce_op_handler(Graph *graph, Node *node, const std::string &op_name) { auto *op = node->Op(); auto attrs = AttributeMap{}; - auto reduce_all = BOOST_GET_CONST(bool, op->GetAttr("reduce_all")); + auto reduce_all = PADDLE_GET_CONST(bool, op->GetAttr("reduce_all")); if (!reduce_all) { - auto axes_ = BOOST_GET_CONST(std::vector, op->GetAttr("dim")); + auto axes_ = PADDLE_GET_CONST(std::vector, op->GetAttr("dim")); auto axes = std::vector{axes_.begin(), axes_.end()}; attrs.emplace("axes", axes); } - auto keepdims_ = BOOST_GET_CONST(bool, op->GetAttr("keep_dim")); + auto keepdims_ = PADDLE_GET_CONST(bool, op->GetAttr("keep_dim")); auto keepdims = int64_t{keepdims_}; attrs.emplace("keepdims", keepdims); return CreateBaseOp(graph, node, op_name, node->inputs, node->outputs, attrs); @@ -41,13 +41,13 @@ Node *reduce_all_op_handler(Graph *graph, const std::string &op_name) { auto *op = node->Op(); auto attrs = AttributeMap{}; - auto reduce_all = BOOST_GET_CONST(bool, op->GetAttr("reduce_all")); + auto reduce_all = PADDLE_GET_CONST(bool, op->GetAttr("reduce_all")); if (!reduce_all) { - auto axes_ = BOOST_GET_CONST(std::vector, op->GetAttr("dim")); + auto axes_ = PADDLE_GET_CONST(std::vector, op->GetAttr("dim")); auto axes = std::vector{axes_.begin(), axes_.end()}; attrs.emplace("axes", axes); } - auto keepdims_ = BOOST_GET_CONST(bool, op->GetAttr("keep_dim")); + auto keepdims_ = PADDLE_GET_CONST(bool, op->GetAttr("keep_dim")); auto keepdims = int64_t{keepdims_}; attrs.emplace("keepdims", keepdims); auto int32_x = @@ -80,13 +80,13 @@ Node *reduce_prod_handler(Graph *graph, Node *node) { Node *logsumexp_handler(Graph *graph, Node *node) { auto *op = node->Op(); auto attrs = AttributeMap{}; - auto reduce_all = BOOST_GET_CONST(bool, op->GetAttr("reduce_all")); + auto reduce_all = PADDLE_GET_CONST(bool, op->GetAttr("reduce_all")); if (!reduce_all) { - auto axes_ = BOOST_GET_CONST(std::vector, op->GetAttr("axis")); + auto axes_ = PADDLE_GET_CONST(std::vector, op->GetAttr("axis")); auto axes = std::vector{axes_.begin(), axes_.end()}; attrs.emplace("axes", axes); } - auto keepdims_ = BOOST_GET_CONST(bool, op->GetAttr("keepdim")); + auto keepdims_ = PADDLE_GET_CONST(bool, op->GetAttr("keepdim")); auto keepdims = int64_t{keepdims_}; attrs.emplace("keepdims", keepdims); return CreateBaseOp(graph, diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/search_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/search_ops.cc index b2d91ba52fd4dc..c80f9c9f995da5 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/search_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/search_ops.cc @@ -27,7 +27,7 @@ Node *topk_handler(Graph *graph, Node *node) { int axis_ = -1; if (op->HasAttr("axis")) { - axis_ = BOOST_GET_CONST(int, op->GetAttr("axis")); + axis_ = PADDLE_GET_CONST(int, op->GetAttr("axis")); } if (axis_ == -1) { auto shape = GetInputVarNode("X", node)->Var()->GetShape(); @@ -43,7 +43,7 @@ Node *topk_handler(Graph *graph, Node *node) { bool largest = true; if (op->HasAttr("largest")) { - largest = BOOST_GET_CONST(bool, op->GetAttr("largest")); + largest = PADDLE_GET_CONST(bool, op->GetAttr("largest")); } if (largest) { // defaults to 1, largest values @@ -54,7 +54,7 @@ Node *topk_handler(Graph *graph, Node *node) { bool sorted = true; if (op->HasAttr("sorted")) { - sorted = BOOST_GET_CONST(bool, op->GetAttr("sorted")); + sorted = PADDLE_GET_CONST(bool, op->GetAttr("sorted")); } if (sorted) { // defaults to 1, sorted results @@ -68,7 +68,7 @@ Node *topk_handler(Graph *graph, Node *node) { if (!op->Input("K").empty()) { var_k = GetInputVarNode("K", node); } else { - auto k = BOOST_GET_CONST(int, op->GetAttr("k")); + auto k = PADDLE_GET_CONST(int, op->GetAttr("k")); auto *op_k = CreateConst(graph, node, {}, @@ -98,8 +98,8 @@ Node *topk_handler(Graph *graph, Node *node) { Node *argsort_handler(Graph *graph, Node *node) { auto *op = node->Op(); auto x_shape = GetInputVarNode("X", node)->Var()->GetShape(); - auto axis_ = BOOST_GET_CONST(int, op->GetAttr("axis")); - auto descending_ = BOOST_GET_CONST(bool, op->GetAttr("descending")); + auto axis_ = PADDLE_GET_CONST(int, op->GetAttr("axis")); + auto descending_ = PADDLE_GET_CONST(bool, op->GetAttr("descending")); if (axis_ < 0) { axis_ = axis_ + x_shape.size(); } diff --git a/paddle/fluid/platform/device/ipu/popart_canonicalization/tensor_ops.cc b/paddle/fluid/platform/device/ipu/popart_canonicalization/tensor_ops.cc index 0bf0335db0f34e..95d273bb66f01c 100644 --- a/paddle/fluid/platform/device/ipu/popart_canonicalization/tensor_ops.cc +++ b/paddle/fluid/platform/device/ipu/popart_canonicalization/tensor_ops.cc @@ -29,10 +29,10 @@ Node *fill_constant_handler(Graph *graph, Node *node) { PADDLE_THROW( platform::errors::Unimplemented("op fill_constant with ShapeTensor")); } - auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype")); + auto dtype_ = PADDLE_GET_CONST(int, op->GetAttr("dtype")); auto dtype = VarType2OnnxDType(static_cast(dtype_)); - auto dims = BOOST_GET_CONST(std::vector, op->GetAttr("shape")); - auto value_ = BOOST_GET_CONST(float, op->GetAttr("value")); + auto dims = PADDLE_GET_CONST(std::vector, op->GetAttr("shape")); + auto value_ = PADDLE_GET_CONST(float, op->GetAttr("value")); int size = 1; for (auto &dim : dims) { size *= dim; @@ -77,13 +77,13 @@ Node *fill_constant_handler(Graph *graph, Node *node) { Node *gaussian_random_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto shape = BOOST_GET_CONST(std::vector, op->GetAttr("shape")); - auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype")); + auto shape = PADDLE_GET_CONST(std::vector, op->GetAttr("shape")); + auto dtype_ = PADDLE_GET_CONST(int, op->GetAttr("dtype")); auto dtype = VarType2OnnxDType(static_cast(dtype_)); - auto mean = BOOST_GET_CONST(float, op->GetAttr("mean")); - auto scale = BOOST_GET_CONST(float, op->GetAttr("std")); + auto mean = PADDLE_GET_CONST(float, op->GetAttr("mean")); + auto scale = PADDLE_GET_CONST(float, op->GetAttr("std")); // seed not work - auto seed_ = BOOST_GET_CONST(int, op->GetAttr("seed")); + auto seed_ = PADDLE_GET_CONST(int, op->GetAttr("seed")); auto seed = static_cast(seed_); return CreateBaseOp(graph, node, @@ -101,13 +101,13 @@ Node *gaussian_random_handler(Graph *graph, Node *node) { Node *uniform_random_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto shape = BOOST_GET_CONST(std::vector, op->GetAttr("shape")); - auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype")); + auto shape = PADDLE_GET_CONST(std::vector, op->GetAttr("shape")); + auto dtype_ = PADDLE_GET_CONST(int, op->GetAttr("dtype")); auto dtype = VarType2OnnxDType(static_cast(dtype_)); - auto high = BOOST_GET_CONST(float, op->GetAttr("max")); - auto low = BOOST_GET_CONST(float, op->GetAttr("min")); + auto high = PADDLE_GET_CONST(float, op->GetAttr("max")); + auto low = PADDLE_GET_CONST(float, op->GetAttr("min")); // seed not work - auto seed_ = BOOST_GET_CONST(int, op->GetAttr("seed")); + auto seed_ = PADDLE_GET_CONST(int, op->GetAttr("seed")); auto seed = static_cast(seed_); return CreateBaseOp(graph, node, @@ -126,7 +126,7 @@ Node *uniform_random_handler(Graph *graph, Node *node) { Node *transpose_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axis_ = BOOST_GET_CONST(std::vector, op->GetAttr("axis")); + auto axis_ = PADDLE_GET_CONST(std::vector, op->GetAttr("axis")); std::vector perm(axis_.begin(), axis_.end()); auto attrs = AttributeMap{{"perm", perm}}; @@ -141,7 +141,7 @@ Node *transpose_handler(Graph *graph, Node *node) { Node *reshape_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto shape_ = BOOST_GET_CONST(std::vector, op->GetAttr("shape")); + auto shape_ = PADDLE_GET_CONST(std::vector, op->GetAttr("shape")); std::vector shape(shape_.begin(), shape_.end()); auto attrs = AttributeMap{ {"value", shape}, @@ -162,7 +162,7 @@ Node *reshape_handler(Graph *graph, Node *node) { Node *flatten2_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axis = BOOST_GET_CONST(int, op->GetAttr("axis")); + auto axis = PADDLE_GET_CONST(int, op->GetAttr("axis")); return CreateBaseOp(graph, node, "popart_flatten", @@ -184,7 +184,7 @@ Node *gather_handler(Graph *graph, Node *node) { Node *squeeze_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axes_ = BOOST_GET_CONST(std::vector, op->GetAttr("axes")); + auto axes_ = PADDLE_GET_CONST(std::vector, op->GetAttr("axes")); auto input_shape_ = GetInputVarNode("X", node)->Var()->GetShape(); std::vector axes{axes_.begin(), axes_.end()}; @@ -207,7 +207,7 @@ Node *squeeze_handler(Graph *graph, Node *node) { Node *cast_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto otype = BOOST_GET_CONST(int, op->GetAttr("out_dtype")); + auto otype = PADDLE_GET_CONST(int, op->GetAttr("out_dtype")); auto new_node = CreateCast(graph, node, node->inputs, @@ -233,7 +233,7 @@ Node *lookup_table_op_handler(Graph *graph, Node *node, const std::string &type) { auto *op = node->Op(); - auto padding_idx_ = BOOST_GET_CONST(int64_t, op->GetAttr("padding_idx")); + auto padding_idx_ = PADDLE_GET_CONST(int64_t, op->GetAttr("padding_idx")); auto w_shape_ = GetInputVarNode("W", node)->Var()->GetShape(); auto table_size_ = w_shape_[0]; auto emb_size_ = w_shape_[1]; @@ -382,7 +382,7 @@ Node *lookup_table_v2_handler(Graph *graph, Node *node) { Node *unsqueeze_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axes_ = BOOST_GET_CONST(std::vector, op->GetAttr("axes")); + auto axes_ = PADDLE_GET_CONST(std::vector, op->GetAttr("axes")); std::vector axes{axes_.begin(), axes_.end()}; auto new_node_unsqueeze = CreateBaseOp(graph, node, @@ -396,7 +396,7 @@ Node *unsqueeze_handler(Graph *graph, Node *node) { Node *concat_handler(Graph *graph, Node *node) { auto *op = node->Op(); - int64_t axis_{BOOST_GET_CONST(int, op->GetAttr("axis"))}; + int64_t axis_{PADDLE_GET_CONST(int, op->GetAttr("axis"))}; auto new_node_concat = CreateBaseOp(graph, node, @@ -409,7 +409,7 @@ Node *concat_handler(Graph *graph, Node *node) { Node *stack_handler(Graph *graph, Node *node) { auto *op = node->Op(); - int64_t axis_{BOOST_GET_CONST(int, op->GetAttr("axis"))}; + int64_t axis_{PADDLE_GET_CONST(int, op->GetAttr("axis"))}; std::vector axes_{axis_}; std::vector unsqueeze_outputs_{}; @@ -445,7 +445,7 @@ Node *slice_handler(Graph *graph, Node *node) { if (!op->HasAttr("starts")) { starts = GetInputVarNode("StartsTensor", node); } else { - auto starts_ = BOOST_GET_CONST(std::vector, op->GetAttr("starts")); + auto starts_ = PADDLE_GET_CONST(std::vector, op->GetAttr("starts")); auto dim = int64_t(starts_.size()); starts = CreateConst( graph, node, std::vector{starts_}, {dim}, ONNXDataType::INT32); @@ -455,7 +455,7 @@ Node *slice_handler(Graph *graph, Node *node) { if (!op->HasAttr("ends")) { ends = GetInputVarNode("EndsTensor", node); } else { - auto ends_ = BOOST_GET_CONST(std::vector, op->GetAttr("ends")); + auto ends_ = PADDLE_GET_CONST(std::vector, op->GetAttr("ends")); auto dim = int64_t(ends_.size()); ends = CreateConst( graph, node, std::vector{ends_}, {dim}, ONNXDataType::INT32); @@ -463,14 +463,14 @@ Node *slice_handler(Graph *graph, Node *node) { } Node *axes = nullptr; { - auto axes_ = BOOST_GET_CONST(std::vector, op->GetAttr("axes")); + auto axes_ = PADDLE_GET_CONST(std::vector, op->GetAttr("axes")); auto dim = int64_t(axes_.size()); axes = CreateConst( graph, node, std::vector{axes_}, {dim}, ONNXDataType::INT32); } auto decrease_axis_ = - BOOST_GET_CONST(std::vector, op->GetAttr("decrease_axis")); + PADDLE_GET_CONST(std::vector, op->GetAttr("decrease_axis")); auto input_shape_ = GetInputVarNode("Input", node)->Var()->GetShape(); auto output_shape_ = GetOutputVarNode("Out", node)->Var()->GetShape(); if (decrease_axis_.size() == 0) { @@ -524,7 +524,7 @@ Node *expand_handler(Graph *graph, Node *node) { VarType::INT64); } else { auto expand_times_i32 = - BOOST_GET_CONST(std::vector, op->GetAttr("expand_times")); + PADDLE_GET_CONST(std::vector, op->GetAttr("expand_times")); auto expand_times_ = std::vector{expand_times_i32.begin(), expand_times_i32.end()}; auto dim = int64_t(expand_times_.size()); @@ -554,31 +554,32 @@ Node *assign_handler(Graph *graph, Node *node) { Node *assign_value_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype")); + auto dtype_ = PADDLE_GET_CONST(int, op->GetAttr("dtype")); auto dtype = VarType2OnnxDType(static_cast(dtype_)); - auto dims_ = BOOST_GET_CONST(std::vector, op->GetAttr("shape")); + auto dims_ = PADDLE_GET_CONST(std::vector, op->GetAttr("shape")); std::vector dims(dims_.begin(), dims_.end()); Attribute values; std::string value_name; switch (dtype_) { case VarType::BOOL: { value_name = "bool_values"; - auto vec_int = BOOST_GET_CONST(std::vector, op->GetAttr(value_name)); + auto vec_int = + PADDLE_GET_CONST(std::vector, op->GetAttr(value_name)); std::vector vec_bool(vec_int.begin(), vec_int.end()); values = vec_bool; } break; case VarType::INT32: value_name = "int32_values"; - values = BOOST_GET_CONST(std::vector, op->GetAttr(value_name)); + values = PADDLE_GET_CONST(std::vector, op->GetAttr(value_name)); break; case VarType::FP16: case VarType::FP32: value_name = "fp32_values"; - values = BOOST_GET_CONST(std::vector, op->GetAttr(value_name)); + values = PADDLE_GET_CONST(std::vector, op->GetAttr(value_name)); break; case VarType::INT64: value_name = "int64_values"; - values = BOOST_GET_CONST(std::vector, op->GetAttr(value_name)); + values = PADDLE_GET_CONST(std::vector, op->GetAttr(value_name)); break; default: PADDLE_THROW(platform::errors::Unimplemented( @@ -599,9 +600,9 @@ Node *assign_value_handler(Graph *graph, Node *node) { Node *fill_any_like_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto value = BOOST_GET_CONST(float, op->GetAttr("value")); + auto value = PADDLE_GET_CONST(float, op->GetAttr("value")); auto x_shape = GetInputVarNode("X", node)->Var()->GetShape(); - auto dtype_ = BOOST_GET_CONST(int, op->GetAttr("dtype")); + auto dtype_ = PADDLE_GET_CONST(int, op->GetAttr("dtype")); auto dtype = static_cast(dtype_); int size = 1; for (auto &dim : x_shape) { @@ -648,9 +649,9 @@ Node *fill_any_like_handler(Graph *graph, Node *node) { Node *one_hot_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto depth = BOOST_GET_CONST(int, op->GetAttr("depth")); + auto depth = PADDLE_GET_CONST(int, op->GetAttr("depth")); auto allow_out_of_range = - BOOST_GET_CONST(bool, op->GetAttr("allow_out_of_range")); + PADDLE_GET_CONST(bool, op->GetAttr("allow_out_of_range")); if (allow_out_of_range) { PADDLE_THROW(platform::errors::Unimplemented( "Do not support allow_out_of_range=True")); @@ -682,9 +683,9 @@ Node *one_hot_handler(Graph *graph, Node *node) { Node *one_hot_v2_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto depth = BOOST_GET_CONST(int, op->GetAttr("depth")); + auto depth = PADDLE_GET_CONST(int, op->GetAttr("depth")); auto allow_out_of_range = - BOOST_GET_CONST(bool, op->GetAttr("allow_out_of_range")); + PADDLE_GET_CONST(bool, op->GetAttr("allow_out_of_range")); if (allow_out_of_range) { PADDLE_THROW(platform::errors::Unimplemented( "Do not support allow_out_of_range=True")); @@ -728,8 +729,8 @@ Node *one_hot_v2_handler(Graph *graph, Node *node) { Node *split_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axis = BOOST_GET_CONST(int, op->GetAttr("axis")); - auto sections = BOOST_GET_CONST(std::vector, op->GetAttr("sections")); + auto axis = PADDLE_GET_CONST(int, op->GetAttr("axis")); + auto sections = PADDLE_GET_CONST(std::vector, op->GetAttr("sections")); return CreateBaseOp( graph, node, @@ -762,8 +763,8 @@ Node *clip_handler(Graph *graph, Node *node) { auto *op = node->Op(); // if (min_value == -FLT_MAX) then means no min_value // if (max_value == FLT_MAX) then means no max_value - auto min_value = BOOST_GET_CONST(float, op->GetAttr("min")); - auto max_value = BOOST_GET_CONST(float, op->GetAttr("max")); + auto min_value = PADDLE_GET_CONST(float, op->GetAttr("min")); + auto max_value = PADDLE_GET_CONST(float, op->GetAttr("max")); bool has_min_tensor = false; bool has_max_tensor = false; @@ -863,7 +864,7 @@ Node *dist_handler(Graph *graph, Node *node) { auto *abs_node = CreateBaseOp(graph, node, "popart_abs", {sub_node}, {})->outputs[0]; - auto p = BOOST_GET_CONST(float, op->GetAttr("p")); + auto p = PADDLE_GET_CONST(float, op->GetAttr("p")); // Reshape to 1-D output auto target_shape = AttributeMap{{"value", std::vector{-1}}, @@ -963,7 +964,7 @@ Node *expand_as_v2_handler(Graph *graph, Node *node) { } auto input_shape = GetInputVarNode("X", node)->Var()->GetShape(); auto shape_value = - BOOST_GET_CONST(std::vector, op->GetAttr("target_shape")); + PADDLE_GET_CONST(std::vector, op->GetAttr("target_shape")); // Check the dimensions int input_shape_index = input_shape.size() - 1; int target_shape_index = shape_value.size() - 1; @@ -1010,7 +1011,7 @@ Node *expand_v2_handler(Graph *graph, Node *node) { "attribute `shape`.")); } auto input_shape = GetInputVarNode("X", node)->Var()->GetShape(); - auto shape_value = BOOST_GET_CONST(std::vector, op->GetAttr("shape")); + auto shape_value = PADDLE_GET_CONST(std::vector, op->GetAttr("shape")); // Check the dimensions int input_shape_index = input_shape.size() - 1; int target_shape_index = shape_value.size() - 1; @@ -1047,8 +1048,8 @@ Node *expand_v2_handler(Graph *graph, Node *node) { Node *flatten_contiguous_range_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto start_axis = BOOST_GET_CONST(int, op->GetAttr("start_axis")); - auto stop_axis = BOOST_GET_CONST(int, op->GetAttr("stop_axis")); + auto start_axis = PADDLE_GET_CONST(int, op->GetAttr("start_axis")); + auto stop_axis = PADDLE_GET_CONST(int, op->GetAttr("stop_axis")); auto input_rank = GetInputVarNode("X", node)->Var()->GetShape().size(); if (start_axis < 0) { @@ -1097,7 +1098,7 @@ Node *flatten_contiguous_range_handler(Graph *graph, Node *node) { Node *flip_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto axes = BOOST_GET_CONST(std::vector, op->GetAttr("axis")); + auto axes = PADDLE_GET_CONST(std::vector, op->GetAttr("axis")); auto input_shape = GetInputVarNode("X", node)->Var()->GetShape(); for (auto it = axes.begin(); it != axes.end();) { if (*it < 0) { @@ -1200,9 +1201,9 @@ Node *meshgrid_handler(Graph *graph, Node *node) { Node *p_norm_handler(Graph *graph, Node *node) { auto *op = node->Op(); - auto keepdim = BOOST_GET_CONST(bool, op->GetAttr("keepdim")); - auto axis = BOOST_GET_CONST(int, op->GetAttr("axis")); - auto porder = BOOST_GET_CONST(float, op->GetAttr("porder")); + auto keepdim = PADDLE_GET_CONST(bool, op->GetAttr("keepdim")); + auto axis = PADDLE_GET_CONST(int, op->GetAttr("axis")); + auto porder = PADDLE_GET_CONST(float, op->GetAttr("porder")); auto target_dtype = ONNXDataType::FLOAT; if (GetInputVarNode("X", node)->Var()->GetDataType() == VarType::FP16) { diff --git a/paddle/fluid/platform/device/npu/npu_op_runner.cc b/paddle/fluid/platform/device/npu/npu_op_runner.cc index 99828a425517b0..7b3ea740655708 100644 --- a/paddle/fluid/platform/device/npu/npu_op_runner.cc +++ b/paddle/fluid/platform/device/npu/npu_op_runner.cc @@ -127,19 +127,19 @@ NpuOpRunner &NpuOpRunner::AddAttr(const std::string &name, } if (attr.type() == typeid(bool)) { PADDLE_ENFORCE_NPU_SUCCESS( - aclopSetAttrBool(attr_, name.c_str(), BOOST_GET_CONST(bool, attr))); + aclopSetAttrBool(attr_, name.c_str(), PADDLE_GET_CONST(bool, attr))); } else if (attr.type() == typeid(int)) { PADDLE_ENFORCE_NPU_SUCCESS( - aclopSetAttrInt(attr_, name.c_str(), BOOST_GET_CONST(int, attr))); + aclopSetAttrInt(attr_, name.c_str(), PADDLE_GET_CONST(int, attr))); } else if (attr.type() == typeid(int64_t)) { PADDLE_ENFORCE_NPU_SUCCESS( - aclopSetAttrInt(attr_, name.c_str(), BOOST_GET_CONST(int64_t, attr))); + aclopSetAttrInt(attr_, name.c_str(), PADDLE_GET_CONST(int64_t, attr))); } else if (attr.type() == typeid(float)) { PADDLE_ENFORCE_NPU_SUCCESS( - aclopSetAttrFloat(attr_, name.c_str(), BOOST_GET_CONST(float, attr))); + aclopSetAttrFloat(attr_, name.c_str(), PADDLE_GET_CONST(float, attr))); } else if (attr.type() == typeid(std::vector)) { - auto a = BOOST_GET_CONST(std::vector, attr); + auto a = PADDLE_GET_CONST(std::vector, attr); std::vector cast_a; for (auto it : a) { cast_a.push_back(static_cast(it)); @@ -147,7 +147,7 @@ NpuOpRunner &NpuOpRunner::AddAttr(const std::string &name, PADDLE_ENFORCE_NPU_SUCCESS(aclopSetAttrListBool( attr_, name.c_str(), cast_a.size(), cast_a.data())); } else if (attr.type() == typeid(std::vector)) { - auto a = BOOST_GET_CONST(std::vector, attr); + auto a = PADDLE_GET_CONST(std::vector, attr); std::vector cast_a; for (auto it : a) { cast_a.push_back(static_cast(it)); @@ -155,19 +155,19 @@ NpuOpRunner &NpuOpRunner::AddAttr(const std::string &name, PADDLE_ENFORCE_NPU_SUCCESS( aclopSetAttrListInt(attr_, name.c_str(), cast_a.size(), cast_a.data())); } else if (attr.type() == typeid(std::vector)) { - auto a = BOOST_GET_CONST(std::vector, attr); + auto a = PADDLE_GET_CONST(std::vector, attr); PADDLE_ENFORCE_NPU_SUCCESS( aclopSetAttrListInt(attr_, name.c_str(), a.size(), a.data())); } else if (attr.type() == typeid(std::vector)) { - auto a = BOOST_GET_CONST(std::vector, attr); + auto a = PADDLE_GET_CONST(std::vector, attr); PADDLE_ENFORCE_NPU_SUCCESS( aclopSetAttrListFloat(attr_, name.c_str(), a.size(), a.data())); } else if (attr.type() == typeid(std::string)) { - auto a = BOOST_GET_CONST(std::string, attr); + auto a = PADDLE_GET_CONST(std::string, attr); PADDLE_ENFORCE_NPU_SUCCESS( aclopSetAttrString(attr_, name.c_str(), a.c_str())); } else if (attr.type() == typeid(std::vector)) { - auto a = BOOST_GET_CONST(std::vector, attr); + auto a = PADDLE_GET_CONST(std::vector, attr); std::vector s; for (auto &it : a) { s.push_back(it.data()); @@ -175,7 +175,7 @@ NpuOpRunner &NpuOpRunner::AddAttr(const std::string &name, PADDLE_ENFORCE_NPU_SUCCESS( aclopSetAttrListString(attr_, name.c_str(), s.size(), s.data())); } else if (attr.type() == typeid(std::vector>)) { - auto a = BOOST_GET_CONST(std::vector>, attr); + auto a = PADDLE_GET_CONST(std::vector>, attr); std::vector data; std::vector num; for (auto &&v : a) { @@ -201,8 +201,8 @@ NpuOpRunner &NpuOpRunner::AddAttrDataType(const std::string &name, if (!attr_) { attr_ = aclopCreateAttr(); } - auto dtype = ConvertToNpuDtype( - static_cast(BOOST_GET_CONST(int, attr))); + auto dtype = ConvertToNpuDtype(static_cast( + PADDLE_GET_CONST(int, attr))); PADDLE_ENFORCE_NPU_SUCCESS(aclopSetAttrDataType(attr_, name.c_str(), dtype)); return *this; } diff --git a/paddle/fluid/platform/enforce.h b/paddle/fluid/platform/enforce.h index 6b33af9ac10bae..3cad2e3d2055e3 100644 --- a/paddle/fluid/platform/enforce.h +++ b/paddle/fluid/platform/enforce.h @@ -122,7 +122,7 @@ using namespace ::phi::enforce; // NOLINT #endif /* - * Summary: This BOOST_GET(_**) series macros are used to call paddle::get + * Summary: This PADDLE_GET(_**) series macros are used to call paddle::get * safely. paddle::get is not a completely safe api, although it will not * go wrong in most cases, but in extreme cases, it may fail and directly * throw a paddle::bad_variant_access const exception, without any stack @@ -137,17 +137,17 @@ using namespace ::phi::enforce; // NOLINT * * Examples: * - unsafe writing: int x = paddle::get(y); - * - safe writing: int x = BOOST_GET(int, y); + * - safe writing: int x = PADDLE_GET(int, y); * * Note: GCC 4.8 cannot select right overloaded function here, so need * to define different functions and macros here, after we upgreade - * CI gcc version, we can only define one BOOST_GET macro. + * CI gcc version, we can only define one PADDLE_GET macro. */ namespace details { using namespace phi::enforce::details; // NOLINT -#define DEFINE_SAFE_BOOST_GET( \ +#define DEFINE_SAFE_PADDLE_GET( \ __InputType, __OutputType, __OutputTypePtr, __FuncName) \ template \ auto __FuncName( \ @@ -172,25 +172,25 @@ using namespace phi::enforce::details; // NOLINT } \ } -DEFINE_SAFE_BOOST_GET(InputType&, OutputType&, OutputType*, SafeBoostGet); -DEFINE_SAFE_BOOST_GET(const InputType&, - const OutputType&, - const OutputType*, - SafeBoostGetConst); -DEFINE_SAFE_BOOST_GET(InputType&&, - OutputType, - OutputType*, - SafeBoostGetMutable); +DEFINE_SAFE_PADDLE_GET(InputType&, OutputType&, OutputType*, SafeBoostGet); +DEFINE_SAFE_PADDLE_GET(const InputType&, + const OutputType&, + const OutputType*, + SafeBoostGetConst); +DEFINE_SAFE_PADDLE_GET(InputType&&, + OutputType, + OutputType*, + SafeBoostGetMutable); } // namespace details -#define BOOST_GET(__TYPE, __VALUE) \ +#define PADDLE_GET(__TYPE, __VALUE) \ paddle::platform::details::SafeBoostGet<__TYPE>( \ __VALUE, #__VALUE, __FILE__, __LINE__) -#define BOOST_GET_CONST(__TYPE, __VALUE) \ +#define PADDLE_GET_CONST(__TYPE, __VALUE) \ paddle::platform::details::SafeBoostGetConst<__TYPE>( \ __VALUE, #__VALUE, __FILE__, __LINE__) -#define BOOST_GET_MUTABLE(__TYPE, __VALUE) \ +#define PADDLE_GET_MUTABLE(__TYPE, __VALUE) \ paddle::platform::details::SafeBoostGetMutable<__TYPE>( \ __VALUE, #__VALUE, __FILE__, __LINE__) diff --git a/paddle/fluid/platform/enforce_test.cc b/paddle/fluid/platform/enforce_test.cc index 83e117ed836125..48aece1c4170b3 100644 --- a/paddle/fluid/platform/enforce_test.cc +++ b/paddle/fluid/platform/enforce_test.cc @@ -622,19 +622,19 @@ TEST(OP_INOUT_CHECK_MACRO, FAIL) { EXPECT_TRUE(caught_exception); } -TEST(BOOST_GET_SAFELY, SUCCESS) { +TEST(PADDLE_GET_SAFELY, SUCCESS) { paddle::framework::Attribute attr; attr = true; - bool rlt = BOOST_GET(bool, attr); + bool rlt = PADDLE_GET(bool, attr); EXPECT_EQ(rlt, true); } -TEST(BOOST_GET_SAFELY, FAIL) { +TEST(PADDLE_GET_SAFELY, FAIL) { paddle::framework::Attribute attr; attr = true; bool caught_exception = false; try { - BOOST_GET(int, attr); + PADDLE_GET(int, attr); } catch (paddle::platform::EnforceNotMet& error) { caught_exception = true; } diff --git a/paddle/fluid/platform/profiler.cc b/paddle/fluid/platform/profiler.cc index d02fd54578862b..59233568512498 100644 --- a/paddle/fluid/platform/profiler.cc +++ b/paddle/fluid/platform/profiler.cc @@ -270,7 +270,7 @@ RecordOpInfoSupplement::RecordOpInfoSupplement( auto iter = attrs.find( framework::OpProtoAndCheckerMaker::OpCreationCallstackAttrName()); if (iter != attrs.end()) { - callstack_ptr = &BOOST_GET_CONST(std::vector, iter->second); + callstack_ptr = &PADDLE_GET_CONST(std::vector, iter->second); callstack = *callstack_ptr; } HostEventRecorder::GetInstance().RecordEvent( @@ -301,7 +301,7 @@ RecordOpInfoSupplement::RecordOpInfoSupplement( auto iter = attrs.find( framework::OpProtoAndCheckerMaker::OpCreationCallstackAttrName()); if (iter != attrs.end()) { - callstack_ptr = &BOOST_GET_CONST(std::vector, iter->second); + callstack_ptr = &PADDLE_GET_CONST(std::vector, iter->second); callstack = *callstack_ptr; } HostEventRecorder::GetInstance().RecordEvent( diff --git a/paddle/fluid/pybind/pybind.cc b/paddle/fluid/pybind/pybind.cc index 40a03248cd22d7..4232f5c7485488 100644 --- a/paddle/fluid/pybind/pybind.cc +++ b/paddle/fluid/pybind/pybind.cc @@ -1684,9 +1684,9 @@ All parameter, weight, gradient are variables in Paddle. size_t index) -> py::object { auto &var = framework::GetFetchVariable(scope, var_name, index); if (data_is_lod_tensor(var)) { - return py::cast(BOOST_GET(LoDTensor, var)); + return py::cast(PADDLE_GET(LoDTensor, var)); } else { - return py::cast(BOOST_GET(LoDTensorArray, var)); + return py::cast(PADDLE_GET(LoDTensorArray, var)); } }); m.def("get_variable_tensor", framework::GetVariableTensor); @@ -1792,10 +1792,10 @@ All parameter, weight, gradient are variables in Paddle. py::list res(self.size()); for (size_t i = 0; i < self.size(); ++i) { if (data_is_lod_tensor(self[i])) { - auto &data = BOOST_GET(LoDTensor, self[i]); + auto &data = PADDLE_GET(LoDTensor, self[i]); res[i] = py::cast(std::move(data)); } else { - auto &data = BOOST_GET(LoDTensorArray, self[i]); + auto &data = PADDLE_GET(LoDTensorArray, self[i]); py::list tmp(data.size()); for (size_t j = 0; j < data.size(); ++j) { tmp[j] = py::cast(std::move(data[j])); @@ -1812,7 +1812,7 @@ All parameter, weight, gradient are variables in Paddle. "append", [](FetchList &self, const LoDTensor &t) { self.emplace_back(); - auto &lod_tensor = BOOST_GET(LoDTensor, self.back()); + auto &lod_tensor = PADDLE_GET(LoDTensor, self.back()); lod_tensor.ShareDataWith(t); lod_tensor.set_lod(t.lod()); }, @@ -1822,7 +1822,7 @@ All parameter, weight, gradient are variables in Paddle. "append", [](FetchList &self, const LoDTensorArray &t) { self.emplace_back(); - auto &lod_tensor_array = BOOST_GET(LoDTensorArray, self.back()); + auto &lod_tensor_array = PADDLE_GET(LoDTensorArray, self.back()); for (size_t i = 0; i < t.size(); ++i) { lod_tensor_array[i].ShareDataWith(t[i]); lod_tensor_array[i].set_lod(t[i].lod()); @@ -1841,10 +1841,10 @@ All parameter, weight, gradient are variables in Paddle. py::list tmp(self[i].size()); for (size_t j = 0; j < self[i].size(); ++j) { if (data_is_lod_tensor(self[i][j])) { - auto &var = BOOST_GET(LoDTensor, self[i][j]); + auto &var = PADDLE_GET(LoDTensor, self[i][j]); tmp[j] = py::cast(std::move(var)); } else { - auto &var = BOOST_GET(LoDTensorArray, self[i][j]); + auto &var = PADDLE_GET(LoDTensorArray, self[i][j]); py::list tmp_array(var.size()); for (size_t k = 0; k < var.size(); ++k) { tmp_array[k] = std::move(var[k]); diff --git a/paddle/infrt/host_context/mlir_to_runtime_translate.h b/paddle/infrt/host_context/mlir_to_runtime_translate.h index 64dc770489c4d6..212b13c3c0cfd0 100644 --- a/paddle/infrt/host_context/mlir_to_runtime_translate.h +++ b/paddle/infrt/host_context/mlir_to_runtime_translate.h @@ -16,7 +16,6 @@ #include -#include #include // NOLINT #include //NOLINT #include // NOLINT diff --git a/tools/check_file_diff_approvals.sh b/tools/check_file_diff_approvals.sh index 55b55faabf9933..1b0681759a749b 100644 --- a/tools/check_file_diff_approvals.sh +++ b/tools/check_file_diff_approvals.sh @@ -205,12 +205,6 @@ if [ ${HAS_CONST_CAST} ] && [ "${GIT_PR_ID}" != "" ]; then check_approval 1 46782768 12538138 6836917 22561442 6888866 16605440 fi -HAS_BOOST_GET=`git diff -U0 upstream/$BRANCH $FILTER |grep "^+" |grep -o -m 1 "boost::get" || true` -if [ ${HAS_BOOST_GET} ] && [ "${GIT_PR_ID}" != "" ]; then - echo_line="boost::get is not recommended, because it may throw an bad_get exception without any stack information, so please use BOOST_GET(_**)(dtype, value) series macros here. If these macros cannot meet your needs, please use try-catch to handle boost::get and request chenwhql (Recommend), luotao1 or lanxianghit review and approve.\n" - check_approval 1 6836917 47554610 22561442 -fi - # infrt needs to temporarily use LOG(FATAL) during the debugging period, and will replace it with standard error format in the future. NO_INFRT_FILES=`git diff --name-only upstream/develop | grep -v "tools/\|paddle/infrt/" || true` HAS_LOG_FATAL=`git diff -U0 upstream/$BRANCH $NO_INFRT_FILES |grep "^+" |grep -o -m 1 "LOG(FATAL)" || true` From bc02027fa1fc2f57e3fc5c9345a0cdf7355e7742 Mon Sep 17 00:00:00 2001 From: chenruibiao Date: Tue, 19 Jul 2022 10:27:38 +0800 Subject: [PATCH 2/2] Fix conflicts --- paddle/fluid/inference/tensorrt/convert/fill_constant_op.cc | 6 +++--- paddle/fluid/inference/tensorrt/op_teller.cc | 2 +- paddle/utils/tribool.h | 4 +--- 3 files changed, 5 insertions(+), 7 deletions(-) diff --git a/paddle/fluid/inference/tensorrt/convert/fill_constant_op.cc b/paddle/fluid/inference/tensorrt/convert/fill_constant_op.cc index 53eb3f2c897329..3bbbfe0374325a 100644 --- a/paddle/fluid/inference/tensorrt/convert/fill_constant_op.cc +++ b/paddle/fluid/inference/tensorrt/convert/fill_constant_op.cc @@ -27,11 +27,11 @@ class FillConstantOpConverter : public OpConverter { << "convert a fluid fill_constant op to tensorrt fill_constant layer"; framework::OpDesc op_desc(op, nullptr); - int dtype = BOOST_GET_CONST(int, op_desc.GetAttr("dtype")); + int dtype = PADDLE_GET_CONST(int, op_desc.GetAttr("dtype")); std::string str_value = - BOOST_GET_CONST(std::string, op_desc.GetAttr("str_value")); + PADDLE_GET_CONST(std::string, op_desc.GetAttr("str_value")); std::vector shape = - BOOST_GET_CONST(std::vector, op_desc.GetAttr("shape")); + PADDLE_GET_CONST(std::vector, op_desc.GetAttr("shape")); std::unique_ptr out_tensor(new framework::Tensor()); out_tensor->Resize(phi::make_ddim(shape)); nvinfer1::DataType trt_dtype = nvinfer1::DataType::kFLOAT; diff --git a/paddle/fluid/inference/tensorrt/op_teller.cc b/paddle/fluid/inference/tensorrt/op_teller.cc index 36a7ef535a0a68..222f698b6b7e25 100644 --- a/paddle/fluid/inference/tensorrt/op_teller.cc +++ b/paddle/fluid/inference/tensorrt/op_teller.cc @@ -1466,7 +1466,7 @@ bool OpTeller::Tell(const framework::ir::Node* node, fill_constant_inputs.end()) { if (desc.Input("ShapeTensorList").size()) return false; } - int dtype = BOOST_GET_CONST(int, desc.GetAttr("dtype")); + int dtype = PADDLE_GET_CONST(int, desc.GetAttr("dtype")); // only support int32, int64, float32 if (!(dtype == 2 || dtype == 3 || dtype == 5)) { return false; diff --git a/paddle/utils/tribool.h b/paddle/utils/tribool.h index 9ede76f3ec15eb..f08cc5805f1fc2 100644 --- a/paddle/utils/tribool.h +++ b/paddle/utils/tribool.h @@ -63,9 +63,7 @@ typedef bool (*indeterminate_keyword_t)(tribool, detail::indeterminate_t); * as a unary function that tells whether the tribool value is in the * "indeterminate" state. It's second role is as a keyword * representing the indeterminate (just like "true" and "false" - * represent the true and false states). If you do not like the name - * "indeterminate", and would prefer to use a different name, see the - * macro \c BOOST_TRIBOOL_THIRD_STATE. + * represent the true and false states). * * \returns x.value == tribool::indeterminate_value * \throws nothrow