Skip to content

Commit

Permalink
Fix
Browse files Browse the repository at this point in the history
  • Loading branch information
co63oc committed Apr 29, 2024
1 parent 8325d5d commit 3ac969c
Show file tree
Hide file tree
Showing 8 changed files with 77 additions and 345 deletions.
285 changes: 0 additions & 285 deletions paddle/fluid/operators/cudnn_lstm_op.cc

This file was deleted.

59 changes: 0 additions & 59 deletions paddle/fluid/operators/ops_signature/cudnn_lstm_sig.cc

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -281,9 +281,12 @@ def GenBuildOutputsPart2(
}}
"""

# In cudnn_lstm operator, the output weight_list_grad requires the use of optional input weight_list,
# so "pir::VectorType {name}" outside the "if" block.
CREATE_OPTIONAL_INPUT_VEC_METATENSOR_TEMPLATE = """ std::vector<paddle::dialect::IrTensor> vec_ir_tensor_{name};
pir::VectorType {name};
if ({name}_.impl() != nullptr) {{
pir::VectorType {name} = {name}_.type().dyn_cast<pir::VectorType>();
{name} = {name}_.type().dyn_cast<pir::VectorType>();
for (size_t i=0; i < static_cast<size_t>({name}.size()); i++) {{
if({name}[i].isa<paddle::dialect::DenseTensorType>()) {{
auto {name}_type = {name}[i].dyn_cast<paddle::dialect::DenseTensorType>();
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/backward.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -519,6 +519,17 @@
func : cross_grad
data_type : out_grad

- backward_op : cudnn_lstm_grad
forward: cudnn_lstm (Tensor x, Tensor init_h, Tensor init_c, Tensor w, Tensor[] weight_list, Tensor sequence_length, float dropout_prob = 0.0, bool is_bidirec = false, int hidden_size = 100, int num_layers = 1, bool is_test = false, int seed = 0) -> Tensor (out), Tensor (last_h), Tensor (last_c), Tensor (reserve), Tensor (state_out)
args: (Tensor x, Tensor init_h, Tensor init_c, Tensor[] weight_list, Tensor sequence_length, Tensor out, Tensor reserve, Tensor state_out, Tensor out_grad, Tensor last_h_grad, Tensor last_c_grad, float dropout_prob = 0.0, bool is_bidirec = false, int hidden_size = 100, int num_layers = 1, bool is_test = false, int seed = 0)
output: Tensor (x_grad), Tensor (init_h_grad), Tensor (init_c_grad), Tensor[](weight_list_grad){weight_list.size()}
infer_meta:
func: CudnnLSTMGradInferMeta
param : [x, init_h, init_c, weight_list]
kernel:
func: cudnn_lstm_grad
optional: weight_list, sequence_length, weight_list_grad

- backward_op : cummax_grad
forward : cummax(Tensor x, int axis=-1, DataType dtype = DataType::INT64) -> Tensor(out), Tensor(indices)
args : (Tensor x, Tensor indices, Tensor out_grad, int axis, DataType dtype)
Expand Down
Loading

0 comments on commit 3ac969c

Please sign in to comment.