diff --git a/compiler/plugins/input/Torch/InputConversion/BindSymbolicShapes.cpp b/compiler/plugins/input/Torch/InputConversion/BindSymbolicShapes.cpp index f8e13f2fa0e8..f1b0793b5b91 100644 --- a/compiler/plugins/input/Torch/InputConversion/BindSymbolicShapes.cpp +++ b/compiler/plugins/input/Torch/InputConversion/BindSymbolicShapes.cpp @@ -281,11 +281,11 @@ class BindSymbolicShapesPass final case AffineExprKind::Mul: return builder.create(loc, lhs, rhs); case AffineExprKind::Mod: - return builder.create(loc, lhs, rhs); + return builder.create(loc, lhs, rhs); case AffineExprKind::FloorDiv: - return builder.create(loc, lhs, rhs); + return builder.create(loc, lhs, rhs); case AffineExprKind::CeilDiv: - return builder.create(loc, lhs, rhs); + return builder.create(loc, lhs, rhs); default: break; } diff --git a/compiler/plugins/input/Torch/InputConversion/test/bind_symbolic_shapes.mlir b/compiler/plugins/input/Torch/InputConversion/test/bind_symbolic_shapes.mlir index 699b6dbf6d60..335ca3056a1c 100644 --- a/compiler/plugins/input/Torch/InputConversion/test/bind_symbolic_shapes.mlir +++ b/compiler/plugins/input/Torch/InputConversion/test/bind_symbolic_shapes.mlir @@ -113,8 +113,8 @@ module @add_expr { // CHECK-LABEL: @mod_expr module @mod_expr { func.func @main(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) { - // CHECK: remui - // CHECK-NOT: udiv + // CHECK: remsi + // CHECK-NOT: sdiv %0 = torch.symbolic_int "s0" {min_val = 0, max_val = 1024} : !torch.int %1 = torch.symbolic_int "s1" {min_val = 0, max_val = 1024} : !torch.int torch.bind_symbolic_shape %arg0, [%0, %1], affine_map<()[s0, s1] -> (s0, s1)> : !torch.vtensor<[?,?],f32> @@ -127,8 +127,8 @@ module @mod_expr { // CHECK-LABEL: @floordiv_expr module @floordiv_expr { func.func @main(%arg0: !torch.vtensor<[?,?],f32>, %arg1: !torch.vtensor<[?,?],f32>) { - // CHECK: divui - // CHECK-NOT: udiv + // CHECK: divsi + // CHECK-NOT: sdiv %0 = torch.symbolic_int "s0" {min_val = 0, max_val = 1024} : !torch.int %1 = torch.symbolic_int "s1" {min_val = 0, max_val = 1024} : !torch.int torch.bind_symbolic_shape %arg0, [%0, %1], affine_map<()[s0, s1] -> (s0, s1)> : !torch.vtensor<[?,?],f32> diff --git a/compiler/src/iree/compiler/Codegen/Common/test/block_dynamic_dims.mlir b/compiler/src/iree/compiler/Codegen/Common/test/block_dynamic_dims.mlir index 7f252f25e882..7cfcd54af84b 100644 --- a/compiler/src/iree/compiler/Codegen/Common/test/block_dynamic_dims.mlir +++ b/compiler/src/iree/compiler/Codegen/Common/test/block_dynamic_dims.mlir @@ -66,11 +66,11 @@ func.func @block_attention_dims() { // CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index // CHECK-DAG: %[[M:.+]] = flow.dispatch.workload.ordinal %{{.+}}, 0 : index // CHECK-DAG: %[[K2:.+]] = flow.dispatch.workload.ordinal %{{.+}}, 1 : index -// CHECK-DAG: %[[M_DYNAMIC:.+]] = arith.divui %[[M]], %[[C16]] +// CHECK-DAG: %[[M_DYNAMIC:.+]] = arith.divsi %[[M]], %[[C16]] // CHECK: %[[Q_BINDING:.+]] = hal.interface.binding.subspan // CHECK-SAME: binding(0) // CHECK-SAME: !flow.dispatch.tensor>{%[[M_DYNAMIC]]} -// CHECK: %[[K2_DYNAMIC:.+]] = arith.divui %[[K2]], %[[C32]] +// CHECK: %[[K2_DYNAMIC:.+]] = arith.divsi %[[K2]], %[[C32]] // CHECK: %[[K_BINDING:.+]] = hal.interface.binding.subspan // CHECK-SAME: binding(1) // CHECK-SAME: !flow.dispatch.tensor>{%[[K2_DYNAMIC]]} diff --git a/compiler/src/iree/compiler/Codegen/Common/test/propagate_reshapes_by_expansion.mlir b/compiler/src/iree/compiler/Codegen/Common/test/propagate_reshapes_by_expansion.mlir index abced1727ced..db715dea9d07 100644 --- a/compiler/src/iree/compiler/Codegen/Common/test/propagate_reshapes_by_expansion.mlir +++ b/compiler/src/iree/compiler/Codegen/Common/test/propagate_reshapes_by_expansion.mlir @@ -57,7 +57,7 @@ func.func @fold_expand_into_loads_dynamic() -> tensor<2x?x16x32xf32> { // CHECK-LABEL: func @fold_expand_into_loads_dynamic() // CHECK-DAG: %[[C16:.+]] = arith.constant 16 : index // CHECK-DAG: %[[CONST:.+]] = hal.interface.constant.load -// CHECK: %[[SHAPE:.+]] = arith.divui %[[CONST]], %[[C16]] +// CHECK: %[[SHAPE:.+]] = arith.divsi %[[CONST]], %[[C16]] // CHECK: %[[SUBSPAN:.+]] = hal.interface.binding.subspan // CHECK-SAME: !flow.dispatch.tensor>{%[[SHAPE]]} // CHECK: %[[LOAD:.+]] = flow.dispatch.tensor.load %[[SUBSPAN]] @@ -81,7 +81,7 @@ func.func @fold_collapse_into_stores_dynamic(%arg0 : tensor<2x?x32xf32>) { // CHECK-LABEL: func @fold_collapse_into_stores_dynamic( // CHECK-DAG: %[[C2:.+]] = arith.constant 2 : index // CHECK: %[[CONST:.+]] = hal.interface.constant.load -// CHECK: %[[SHAPE:.+]] = arith.divui %[[CONST]], %[[C2]] +// CHECK: %[[SHAPE:.+]] = arith.divsi %[[CONST]], %[[C2]] // CHECK: %[[SUBSPAN:.+]] = hal.interface.binding.subspan // CHECK-SAME: !flow.dispatch.tensor>{%[[SHAPE]]} // CHECK: flow.dispatch.tensor.store %{{.+}}, %[[SUBSPAN]] diff --git a/compiler/src/iree/compiler/DispatchCreation/test/attention_fuse_by_expansion.mlir b/compiler/src/iree/compiler/DispatchCreation/test/attention_fuse_by_expansion.mlir index 247fc7ea5cda..0367d2c386c5 100644 --- a/compiler/src/iree/compiler/DispatchCreation/test/attention_fuse_by_expansion.mlir +++ b/compiler/src/iree/compiler/DispatchCreation/test/attention_fuse_by_expansion.mlir @@ -197,18 +197,17 @@ util.func public @attention_dynamic(%arg0: tensor, %arg1: tensor (s0 floordiv 2)>()[%[[D0]]] -// CHECK-DAG: %[[EMPTY:.+]] = tensor.empty(%[[VAL]], %[[D1]], %[[D4]]) : tensor<2x?x?x?xf16> +// CHECK-DAG: %[[SPLIT0:.+]] = arith.divsi %[[D0]] +// CHECK-DAG: %[[EMPTY:.+]] = tensor.empty(%[[SPLIT0]], %[[D1]], %[[D4]]) : tensor<2x?x?x?xf16> // CHECK-DAG: %[[QUERY:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT0]], %[[D1]], %[[D2]]] // CHECK-DAG: %[[D5:.+]] = tensor.dim %[[ARG1]], %[[C0]] // CHECK-DAG: %[[D6:.+]] = tensor.dim %[[ARG1]], %[[C1]] // CHECK-DAG: %[[D7:.+]] = tensor.dim %[[ARG1]], %[[C2]] -// CHECK-DAG: %[[SPLIT1:.+]] = arith.divui %[[D5]], %[[C2]] +// CHECK-DAG: %[[SPLIT1:.+]] = arith.divsi %[[D5]], %[[C2]] // CHECK-DAG: %[[KEY:.+]] = tensor.expand_shape %[[ARG1]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT1]], %[[D6]], %[[D7]]] // CHECK-DAG: %[[D8:.+]] = tensor.dim %[[ARG2]], %[[C0]] // CHECK-DAG: %[[D9:.+]] = tensor.dim %[[ARG2]], %[[C1]] -// CHECK-DAG: %[[SPLIT2:.+]] = arith.divui %[[D8]], %[[C2]] +// CHECK-DAG: %[[SPLIT2:.+]] = arith.divsi %[[D8]], %[[C2]] // CHECK-DAG: %[[CACHE:.+]] = tensor.expand_shape %[[ARG2]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT2]], %[[D9]], %[[D4]]] // CHECK: %[[ATTENTION:.+]] = iree_linalg_ext.attention // CHECK-SAME: indexing_maps = @@ -263,23 +262,22 @@ util.func public @attention_dynamic_masked(%arg0: tensor, %arg1: tens // CHECK-DAG: %[[D1:.+]] = tensor.dim %[[ARG0]], %[[C1]] // CHECK-DAG: %[[D2:.+]] = tensor.dim %[[ARG0]], %[[C2]] // CHECK-DAG: %[[D4:.+]] = tensor.dim %[[ARG2]], %[[C2]] -// CHECK-DAG: %[[SPLIT0:.+]] = arith.divui %[[D0]] -// CHECK-DAG: %[[VAL:.+]] = affine.apply affine_map<()[s0] -> (s0 floordiv 2)>()[%[[D0]]] -// CHECK-DAG: %[[EMPTY:.+]] = tensor.empty(%[[VAL]], %[[D1]], %[[D4]]) : tensor<2x?x?x?xf16> +// CHECK-DAG: %[[SPLIT0:.+]] = arith.divsi %[[D0]] +// CHECK-DAG: %[[EMPTY:.+]] = tensor.empty(%[[SPLIT0]], %[[D1]], %[[D4]]) : tensor<2x?x?x?xf16> // CHECK-DAG: %[[QUERY:.+]] = tensor.expand_shape %[[ARG0]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT0]], %[[D1]], %[[D2]]] // CHECK-DAG: %[[D5:.+]] = tensor.dim %[[ARG1]], %[[C0]] // CHECK-DAG: %[[D6:.+]] = tensor.dim %[[ARG1]], %[[C1]] // CHECK-DAG: %[[D7:.+]] = tensor.dim %[[ARG1]], %[[C2]] -// CHECK-DAG: %[[SPLIT1:.+]] = arith.divui %[[D5]], %[[C2]] +// CHECK-DAG: %[[SPLIT1:.+]] = arith.divsi %[[D5]], %[[C2]] // CHECK-DAG: %[[KEY:.+]] = tensor.expand_shape %[[ARG1]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT1]], %[[D6]], %[[D7]]] // CHECK-DAG: %[[D8:.+]] = tensor.dim %[[ARG2]], %[[C0]] // CHECK-DAG: %[[D9:.+]] = tensor.dim %[[ARG2]], %[[C1]] -// CHECK-DAG: %[[SPLIT2:.+]] = arith.divui %[[D8]], %[[C2]] +// CHECK-DAG: %[[SPLIT2:.+]] = arith.divsi %[[D8]], %[[C2]] // CHECK-DAG: %[[CACHE:.+]] = tensor.expand_shape %[[ARG2]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT2]], %[[D9]], %[[D4]]] // CHECK-DAG: %[[D10:.+]] = tensor.dim %[[ARG4]], %[[C0]] // CHECK-DAG: %[[D11:.+]] = tensor.dim %[[ARG4]], %[[C1]] // CHECK-DAG: %[[D12:.+]] = tensor.dim %[[ARG4]], %[[C2]] -// CHECK-DAG: %[[SPLIT3:.+]] = arith.divui %[[D10]], %[[C2]] +// CHECK-DAG: %[[SPLIT3:.+]] = arith.divsi %[[D10]], %[[C2]] // CHECK-DAG: %[[MASK:.+]] = tensor.expand_shape %[[ARG4]] {{\[}}[0, 1], [2], [3]{{\]}} output_shape [2, %[[SPLIT3]], %[[D11]], %[[D12]]] // CHECK: %[[ATTENTION:.+]] = iree_linalg_ext.attention // CHECK-SAME: indexing_maps = diff --git a/compiler/src/iree/compiler/GlobalOptimization/test/linalg_quantized_conv_to_conv.mlir b/compiler/src/iree/compiler/GlobalOptimization/test/linalg_quantized_conv_to_conv.mlir index d537f7c5308d..aa3f2c8b6e22 100644 --- a/compiler/src/iree/compiler/GlobalOptimization/test/linalg_quantized_conv_to_conv.mlir +++ b/compiler/src/iree/compiler/GlobalOptimization/test/linalg_quantized_conv_to_conv.mlir @@ -292,12 +292,12 @@ func.func @conv2d_all_dyn(%arg0: tensor, %arg1: tensor, // CHECK-SAME: iterator_types = ["parallel", "parallel", "parallel", "reduction"]} // CHECK-SAME: ins(%arg0 : tensor) // CHECK-SAME: outs(%[[FILL]] : tensor) + // CHECK: %[[DIM00:.+]] = tensor.dim %arg0, %[[I0]] // CHECK: %[[EXPAND:.+]] = tensor.expand_shape %[[SUMI]] {{\[\[}}0], [1], [2, 3]] - // CHECK-DAG: %[[DIM0:.+]] = tensor.dim %arg0, %[[I0]] // CHECK-DAG: %[[DIM1:.+]] = tensor.dim %arg2, %[[I1]] // CHECK-DAG: %[[DIM2:.+]] = tensor.dim %arg2, %[[I2]] - // CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM0]], %[[DIM1]], %[[DIM2]]) + // CHECK: %[[EMPTY:.+]] = tensor.empty(%[[DIM00]], %[[DIM1]], %[[DIM2]]) // CHECK: %[[FILL:.+]] = linalg.fill ins(%[[C0]] : i32) outs(%9 : tensor) // CHECK-DAG: %[[DIM0:.+]] = tensor.dim %arg1, %[[I0]] : tensor // CHECK-DAG: %[[DIM1:.+]] = tensor.dim %arg1, %[[I1]] : tensor diff --git a/third_party/llvm-project b/third_party/llvm-project index 47f99c8d32e7..6b1f50b29d70 160000 --- a/third_party/llvm-project +++ b/third_party/llvm-project @@ -1 +1 @@ -Subproject commit 47f99c8d32e71911d9d2a880bb66d8e98fa1b0cf +Subproject commit 6b1f50b29d7084ca418e6c1ee5bf44030cf6b1ef