diff --git a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp index 4bcb0edc4b093..0b8407943a907 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetMachine.cpp @@ -105,7 +105,7 @@ static cl::opt EnablePostMISchedLoadStoreClustering( static cl::opt EnableVLOptimizer("riscv-enable-vl-optimizer", cl::desc("Enable the RISC-V VL Optimizer pass"), - cl::init(false), cl::Hidden); + cl::init(true), cl::Hidden); static cl::opt DisableVectorMaskMutation( "riscv-disable-vector-mask-mutation", diff --git a/llvm/test/CodeGen/RISCV/O3-pipeline.ll b/llvm/test/CodeGen/RISCV/O3-pipeline.ll index 8fd9ae9850366..b0c756e26985b 100644 --- a/llvm/test/CodeGen/RISCV/O3-pipeline.ll +++ b/llvm/test/CodeGen/RISCV/O3-pipeline.ll @@ -119,6 +119,8 @@ ; RV64-NEXT: RISC-V Optimize W Instructions ; CHECK-NEXT: RISC-V Pre-RA pseudo instruction expansion pass ; CHECK-NEXT: RISC-V Merge Base Offset +; CHECK-NEXT: MachineDominator Tree Construction +; CHECK-NEXT: RISC-V VL Optimizer ; CHECK-NEXT: RISC-V Insert Read/Write CSR Pass ; CHECK-NEXT: RISC-V Insert Write VXRM Pass ; CHECK-NEXT: RISC-V Landing Pad Setup @@ -129,7 +131,6 @@ ; CHECK-NEXT: Live Variable Analysis ; CHECK-NEXT: Eliminate PHI nodes for register allocation ; CHECK-NEXT: Two-Address instruction pass -; CHECK-NEXT: MachineDominator Tree Construction ; CHECK-NEXT: Slot index numbering ; CHECK-NEXT: Live Interval Analysis ; CHECK-NEXT: Register Coalescer diff --git a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll index ce4bc48dff042..6f515996677ee 100644 --- a/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/ctlz-vp.ll @@ -2654,9 +2654,8 @@ define @vp_ctlo_zero_undef_nxv1i9( %va, @vp_ctlo_zero_undef_nxv1i9( %va, @buildvec_not_vid_v4i8_2() { define <16 x i8> @buildvec_not_vid_v16i8() { ; CHECK-LABEL: buildvec_not_vid_v16i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vsetivli zero, 7, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 3 +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 6 diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll index 10156141119a7..0bd8466669dc8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll @@ -348,8 +348,9 @@ define <8 x i8> @splat_ve4_ins_i0ve2(<8 x i8> %v) { define <8 x i8> @splat_ve4_ins_i1ve3(<8 x i8> %v) { ; CHECK-LABEL: splat_ve4_ins_i1ve3: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vsetivli zero, 2, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 3 +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 4 ; CHECK-NEXT: vsetivli zero, 2, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v10, v9, 1 @@ -432,8 +433,9 @@ define <8 x i8> @splat_ve2_we0_ins_i2ve4(<8 x i8> %v, <8 x i8> %w) { define <8 x i8> @splat_ve2_we0_ins_i2we4(<8 x i8> %v, <8 x i8> %w) { ; CHECK-LABEL: splat_ve2_we0_ins_i2we4: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vsetivli zero, 3, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 4 +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: li a0, 70 ; CHECK-NEXT: vsetivli zero, 3, e8, mf2, tu, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll index cba8de82ec41b..59c7feb53ce94 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int.ll @@ -1100,15 +1100,17 @@ define void @mulhu_v8i16(ptr %x) { ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0) ; CHECK-NEXT: vmv.v.i v9, 0 +; CHECK-NEXT: vsetivli zero, 7, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 1 ; CHECK-NEXT: li a1, 33 ; CHECK-NEXT: vmv.s.x v0, a1 ; CHECK-NEXT: lui a1, %hi(.LCPI66_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI66_0) +; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v11, 3 ; CHECK-NEXT: vle16.v v12, (a1) ; CHECK-NEXT: vmerge.vim v11, v11, 2, v0 -; CHECK-NEXT: vmv.v.i v13, 0 +; CHECK-NEXT: vmv1r.v v13, v9 ; CHECK-NEXT: vsetivli zero, 7, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v10, 6 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll index 66f95b7077672..abbbfe8f252fb 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-shuffle-changes-length.ll @@ -97,8 +97,9 @@ define <4 x i32> @v4i32_v8i32(<8 x i32>) { define <4 x i32> @v4i32_v16i32(<16 x i32>) { ; RV32-LABEL: v4i32_v16i32: ; RV32: # %bb.0: -; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma +; RV32-NEXT: vsetivli zero, 2, e16, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, 1 +; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; RV32-NEXT: vmv.v.i v14, 6 ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vmv.v.i v0, 10 diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll index 2392cb77c753d..61cc754e21df8 100644 --- a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -184,9 +184,8 @@ define @icmp_uge_vv_nxv1i8( %va, @icmp_uge_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -348,9 +347,8 @@ define @icmp_sge_vv_nxv1i8( %va, @icmp_sge_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -470,9 +468,8 @@ define @icmp_sle_vx_nxv1i8( %va, i8 %b, @icmp_sle_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv1i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -543,10 +540,9 @@ define @icmp_eq_vv_nxv8i7( %va, @llvm.vp.icmp.nxv8i7( %va, %vb, metadata !"eq", %m, i32 %evl) @@ -557,11 +553,10 @@ define @icmp_eq_vx_nxv8i7( %va, i7 %b, poison, i7 %b, i32 0 @@ -574,11 +569,10 @@ define @icmp_eq_vx_swap_nxv8i7( %va, i7 %b, < ; CHECK-LABEL: icmp_eq_vx_swap_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vand.vx v9, v9, a2 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmseq.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -764,9 +758,8 @@ define @icmp_uge_vv_nxv8i8( %va, @icmp_uge_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -928,9 +921,8 @@ define @icmp_sge_vv_nxv8i8( %va, @icmp_sge_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -1050,9 +1042,8 @@ define @icmp_sle_vx_nxv8i8( %va, i8 %b, @icmp_sle_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv8i8: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 @@ -1377,9 +1368,8 @@ define @icmp_uge_vv_nxv1i32( %va, @icmp_uge_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1541,9 +1531,8 @@ define @icmp_sge_vv_nxv1i32( %va, @icmp_sge_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1663,9 +1652,8 @@ define @icmp_sle_vx_nxv1i32( %va, i32 %b, @icmp_sle_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv1i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i32 %b, i32 0 @@ -1887,9 +1875,8 @@ define @icmp_uge_vv_nxv8i32( %va, @icmp_uge_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_uge_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma -; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsleu.vv v12, v16, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -2066,9 +2053,8 @@ define @icmp_sge_vv_nxv8i32( %va, @icmp_sge_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sge_vx_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma -; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -2199,9 +2185,8 @@ define @icmp_sle_vx_nxv8i32( %va, i32 %b, @icmp_sle_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: icmp_sle_vx_swap_nxv8i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, ma -; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmv.v.x v16, a0 ; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t ; CHECK-NEXT: vmv1r.v v0, v12 ; CHECK-NEXT: ret @@ -2644,9 +2629,8 @@ define @icmp_uge_vx_nxv1i64( %va, i64 %b, poison, i64 %b, i32 0 @@ -2898,9 +2882,8 @@ define @icmp_sge_vx_nxv1i64( %va, i64 %b, poison, i64 %b, i32 0 @@ -3095,9 +3078,8 @@ define @icmp_sle_vx_swap_nxv1i64( %va, i64 % ; ; RV64-LABEL: icmp_sle_vx_swap_nxv1i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, ma -; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmv.v.x v9, a0 ; RV64-NEXT: vmsle.vv v0, v9, v8, v0.t ; RV64-NEXT: ret %elt.head = insertelement poison, i64 %b, i32 0 @@ -3431,9 +3413,8 @@ define @icmp_uge_vx_nxv8i64( %va, i64 %b, @icmp_sge_vx_nxv8i64( %va, i64 %b, @icmp_sle_vx_swap_nxv8i64( %va, i64 % ; ; RV64-LABEL: icmp_sle_vx_swap_nxv8i64: ; RV64: # %bb.0: -; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; RV64-NEXT: vmv.v.x v24, a0 ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmv.v.x v24, a0 ; RV64-NEXT: vmsle.vv v16, v24, v8, v0.t ; RV64-NEXT: vmv1r.v v0, v16 ; RV64-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll index c7b5200979370..2814be2792de9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vdiv-vp.ll @@ -11,9 +11,7 @@ define @vdiv_vx_nxv8i7( %a, i7 signext %b, @vdivu_vx_nxv8i7( %a, i7 signext %b, < ; CHECK-LABEL: vdivu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a2, v0.t ; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll index 7ca1983e8b32c..ab67e9833c78a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vfma-vp.ll @@ -4301,10 +4301,9 @@ define @vfnmadd_vf_nxv1f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmadd_vf_nxv1f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v10, a1 ; ZVFHMIN-NEXT: lui a1, 8 -; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t ; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf4, ta, ma @@ -4334,10 +4333,9 @@ define @vfnmadd_vf_nxv1f16_neg_splat_commute( @vfnmadd_vf_nxv1f16_neg_splat_unmasked( @vfnmadd_vf_nxv1f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv1f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmsub_vf_nxv1f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v10, a1 ; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; ZVFHMIN-NEXT: vxor.vx v9, v10, a1, v0.t @@ -4701,9 +4698,10 @@ define @vfnmsub_vf_nxv1f16_neg_splat_commute( @vfnmsub_vf_nxv1f16_neg_splat_unmasked( @vfnmsub_vf_nxv1f16_neg_splat_unmasked_commute( @vfnmadd_vf_nxv2f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmadd_vf_nxv2f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v10, a1 ; ZVFHMIN-NEXT: lui a1, 8 -; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t ; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, mf2, ta, ma @@ -5253,10 +5252,9 @@ define @vfnmadd_vf_nxv2f16_neg_splat_commute( @vfnmadd_vf_nxv2f16_neg_splat_unmasked( @vfnmadd_vf_nxv2f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv2f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmsub_vf_nxv2f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v10, a1 ; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v11, v9 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; ZVFHMIN-NEXT: vxor.vx v9, v10, a1, v0.t @@ -5620,9 +5617,10 @@ define @vfnmsub_vf_nxv2f16_neg_splat_commute( @vfnmsub_vf_nxv2f16_neg_splat_unmasked( @vfnmsub_vf_nxv2f16_neg_splat_unmasked_commute( @vfnmadd_vf_nxv4f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmadd_vf_nxv4f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v10, a1 ; ZVFHMIN-NEXT: lui a1, 8 -; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t ; ZVFHMIN-NEXT: vxor.vx v9, v9, a1, v0.t ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m1, ta, ma @@ -6172,10 +6171,9 @@ define @vfnmadd_vf_nxv4f16_neg_splat_commute( @vfnmadd_vf_nxv4f16_neg_splat_unmasked( @vfnmadd_vf_nxv4f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv4f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmsub_vf_nxv4f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v10, a1 ; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m1, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v12, v9 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; ZVFHMIN-NEXT: vxor.vx v9, v10, a1, v0.t @@ -6539,9 +6536,10 @@ define @vfnmsub_vf_nxv4f16_neg_splat_commute( @vfnmsub_vf_nxv4f16_neg_splat_unmasked( @vfnmsub_vf_nxv4f16_neg_splat_unmasked_commute( @vfnmadd_vf_nxv8f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmadd_vf_nxv8f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v12, a1 ; ZVFHMIN-NEXT: lui a1, 8 -; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t ; ZVFHMIN-NEXT: vxor.vx v10, v10, a1, v0.t ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m2, ta, ma @@ -7091,10 +7090,9 @@ define @vfnmadd_vf_nxv8f16_neg_splat_commute( @vfnmadd_vf_nxv8f16_neg_splat_unmasked( @vfnmadd_vf_nxv8f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv8f16_neg_splat( %va ; ZVFHMIN-LABEL: vfnmsub_vf_nxv8f16_neg_splat: ; ZVFHMIN: # %bb.0: ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v12, a1 ; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m2, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v16, v10 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; ZVFHMIN-NEXT: vxor.vx v10, v12, a1, v0.t @@ -7458,9 +7455,10 @@ define @vfnmsub_vf_nxv8f16_neg_splat_commute( @vfnmsub_vf_nxv8f16_neg_splat_unmasked( @vfnmsub_vf_nxv8f16_neg_splat_unmasked_commute( @vfnmadd_vf_nxv16f16_neg_splat( ; ZVFHMIN-NEXT: addi a1, sp, 16 ; ZVFHMIN-NEXT: vs4r.v v8, (a1) # Unknown-size Folded Spill ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 -; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v16, a1 ; ZVFHMIN-NEXT: lui a1, 8 -; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFHMIN-NEXT: vxor.vx v4, v16, a1, v0.t ; ZVFHMIN-NEXT: vxor.vx v12, v12, a1, v0.t ; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma @@ -8152,12 +8151,11 @@ define @vfnmadd_vf_nxv16f16_neg_splat_commute( @vfnmadd_vf_nxv16f16_neg_splat_unmasked( @vfnmadd_vf_nxv16f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv16f16_neg_splat( ; ; ZVFHMIN-LABEL: vfnmsub_vf_nxv16f16_neg_splat: ; ZVFHMIN: # %bb.0: -; ZVFHMIN-NEXT: vsetvli a1, zero, e16, m4, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFHMIN-NEXT: vmv4r.v v4, v8 ; ZVFHMIN-NEXT: fmv.x.h a1, fa0 ; ZVFHMIN-NEXT: vmv.v.x v16, a1 ; ZVFHMIN-NEXT: lui a1, 8 +; ZVFHMIN-NEXT: vsetvli a2, zero, e16, m4, ta, ma ; ZVFHMIN-NEXT: vfwcvt.f.f.v v24, v12 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m4, ta, ma ; ZVFHMIN-NEXT: vxor.vx v12, v16, a1, v0.t @@ -8592,9 +8589,10 @@ define @vfnmsub_vf_nxv16f16_neg_splat_commute( @vfnmsub_vf_nxv16f16_neg_splat_unmasked( @vfnmsub_vf_nxv16f16_neg_splat_unmasked_commute( @vfnmadd_vf_nxv32f16_neg_splat( ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a4, 8 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: vsetvli a3, zero, e16, m8, ta, ma +; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFHMIN-NEXT: vmv.v.x v8, a2 ; ZVFHMIN-NEXT: mv a3, a0 -; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma ; ZVFHMIN-NEXT: vxor.vx v8, v8, a4, v0.t ; ZVFHMIN-NEXT: vxor.vx v16, v16, a4, v0.t ; ZVFHMIN-NEXT: slli a2, a1, 1 @@ -10848,10 +10847,9 @@ define @vfnmadd_vf_nxv32f16_neg_splat_commute( @vfnmadd_vf_nxv32f16_neg_splat_unmasked( @vfnmadd_vf_nxv32f16_neg_splat_unmasked_commute( @vfnmsub_vf_nxv32f16_neg_splat( ; ZVFHMIN-NEXT: fmv.x.h a2, fa0 ; ZVFHMIN-NEXT: lui a3, 8 ; ZVFHMIN-NEXT: csrr a1, vlenb -; ZVFHMIN-NEXT: vsetvli a4, zero, e16, m8, ta, ma -; ZVFHMIN-NEXT: vmv.v.x v16, a2 ; ZVFHMIN-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; ZVFHMIN-NEXT: vmv.v.x v16, a2 ; ZVFHMIN-NEXT: vxor.vx v24, v16, a3, v0.t ; ZVFHMIN-NEXT: slli a2, a1, 1 ; ZVFHMIN-NEXT: mv a3, a0 @@ -12641,9 +12638,8 @@ define @vfnmsub_vf_nxv32f16_neg_splat_commute( @vfnmsub_vf_nxv32f16_neg_splat_unmasked( @vfnmsub_vf_nxv32f16_neg_splat_unmasked_commute( @vmax_vx_nxv8i7( %a, i7 signext %b, @vmaxu_vx_nxv8i7( %a, i7 signext %b, < ; CHECK-LABEL: vmaxu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a2, v0.t ; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll index 3441934fb1550..79631cd80594c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmin-vp.ll @@ -11,9 +11,7 @@ define @vmin_vx_nxv8i7( %a, i7 signext %b, @vminu_vx_nxv8i7( %a, i7 signext %b, < ; CHECK-LABEL: vminu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a2, v0.t ; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll index 0b23314efde22..b63098b64e292 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmul-vp.ll @@ -1448,11 +1448,10 @@ define @vmul_vadd_vx_nxv8i64_unmasked( %va, ; CHECK-LABEL: vmul_vadd_vx_nxv8i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 21 -; CHECK-NEXT: vsetvli a2, zero, e64, m8, ta, ma -; CHECK-NEXT: vmv.v.x v16, a1 -; CHECK-NEXT: li a1, 7 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma -; CHECK-NEXT: vmadd.vx v8, a1, v16 +; CHECK-NEXT: vmv.v.x v16, a1 +; CHECK-NEXT: li a0, 7 +; CHECK-NEXT: vmadd.vx v8, a0, v16 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %m = shufflevector %head, poison, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll index ba6d95c5a43b7..3273274a70b41 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vrem-vp.ll @@ -11,9 +11,7 @@ define @vrem_vx_nxv8i7( %a, i7 signext %b, @vremu_vx_nxv8i7( %a, i7 signext %b, < ; CHECK-LABEL: vremu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v8, v8, a2, v0.t ; CHECK-NEXT: vand.vx v9, v9, a2, v0.t ; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll index 3421c6af334bc..575d041b091dd 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsadd-vp.ll @@ -9,10 +9,9 @@ declare @llvm.vp.sadd.sat.nxv8i7(, @vsadd_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsadd_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 63 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t diff --git a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll index 180e0799044e8..c9ed72bc63da2 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsaddu-vp.ll @@ -10,11 +10,10 @@ define @vsaddu_vx_nxv8i7( %a, i7 signext %b, ; CHECK-LABEL: vsaddu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vand.vx v9, v9, a2 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: vminu.vx v8, v8, a2, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll index 80dd87ce5da85..5b577dc0f8df9 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll @@ -1125,7 +1125,6 @@ exit: define @clobbered_forwarded_avl(i64 %n, %v, i1 %cmp) { ; CHECK-LABEL: clobbered_forwarded_avl: ; CHECK: # %bb.0: # %entry -; CHECK-NEXT: mv a2, a0 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: andi a1, a1, 1 ; CHECK-NEXT: .LBB27_1: # %for.body @@ -1133,9 +1132,7 @@ define @clobbered_forwarded_avl(i64 %n, %v ; CHECK-NEXT: addi a0, a0, 1 ; CHECK-NEXT: bnez a1, .LBB27_1 ; CHECK-NEXT: # %bb.2: # %for.cond.cleanup -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v10, v8, v8 -; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vadd.vv v8, v10, v8 ; CHECK-NEXT: ret entry: diff --git a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll index 380835494ed17..f5c46aec86b86 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vshl-vp.ll @@ -9,10 +9,9 @@ declare @llvm.vp.shl.nxv8i7(, @vsll_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vsll_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: li a0, 127 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll index 44d3ee96f5e61..001f744503523 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll @@ -7,10 +7,10 @@ define @vsitofp_nxv2bf16_nxv2i7( %va, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsitofp_nxv2bf16_nxv2i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 -; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v9, v8, v0.t ; CHECK-NEXT: vfwcvt.f.x.v v10, v9, v0.t ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma @@ -140,10 +140,10 @@ define @vsitofp_nxv2f16_nxv2i7( %va, @vsra_vi_mask_nxv8i32( %va, @vsra_vv_nxv1i8_sext_zext_mixed_trunc( %va, %vb, %m, i32 zeroext %evl) { ; CHECK-LABEL: vsra_vv_nxv1i8_sext_zext_mixed_trunc: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsext.vf4 v9, v8 ; CHECK-NEXT: vzext.vf4 v10, v8 ; CHECK-NEXT: vsra.vv v8, v9, v10 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret %sexted_va = sext %va to diff --git a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll index 208063bfd2342..961689b15b839 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsra-vp.ll @@ -11,10 +11,8 @@ define @vsra_vx_nxv8i7( %a, i7 signext %b, @vsrl_vx_nxv8i7( %a, i7 signext %b, @llvm.vp.ssub.sat.nxv8i7(, @vssub_vx_nxv8i7( %a, i7 signext %b, %mask, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vx_nxv8i7: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vsra.vi v8, v8, 1 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 63 ; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t @@ -62,9 +61,8 @@ define @vssub_vx_nxv1i8( %va, i8 %b, @vssub_vx_nxv1i8_commute( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssub_vx_nxv1i8_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vssub.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll index 7674a457ca961..b602f11e2c805 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vssubu-vp.ll @@ -10,11 +10,10 @@ define @vssubu_vx_nxv8i7( %a, i7 signext %b, ; CHECK-LABEL: vssubu_vx_nxv8i7: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 127 -; CHECK-NEXT: vsetvli a3, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vand.vx v8, v8, a2 ; CHECK-NEXT: vand.vx v9, v9, a2 -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vssubu.vv v8, v8, v9, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i7 %b, i32 0 @@ -60,9 +59,8 @@ define @vssubu_vx_nxv1i8( %va, i8 %b, @vssubu_vx_nxv1i8_commute( %va, i8 %b, %m, i32 zeroext %evl) { ; CHECK-LABEL: vssubu_vx_nxv1i8_commute: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma -; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vssubu.vv v8, v9, v8, v0.t ; CHECK-NEXT: ret %elt.head = insertelement poison, i8 %b, i32 0 diff --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll index f17cdd9c72bfc..06d85193e3b61 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll @@ -8,9 +8,9 @@ define @vuitofp_nxv2bf16_nxv2i7( %va, @vuitofp_nxv2f16_nxv2i7( %va, @llvm.vp.shl.nxv2i64(, @vwsll_vv_nxv2i64_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv2i64_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -35,10 +34,9 @@ define @vwsll_vv_nxv2i64_sext( %a, @vwsll_vv_nxv2i64_zext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv2i64_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -57,17 +55,15 @@ define @vwsll_vv_nxv2i64_zext( %a, @vwsll_vx_i64_nxv2i64( %a, i64 %b, %m, i32 zeroext %vl) { ; CHECK-RV32-LABEL: vwsll_vx_i64_nxv2i64: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; CHECK-RV32-NEXT: vzext.vf2 v10, v8 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma +; CHECK-RV32-NEXT: vzext.vf2 v10, v8 ; CHECK-RV32-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: vwsll_vx_i64_nxv2i64: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli a2, zero, e64, m2, ta, ma -; CHECK-RV64-NEXT: vzext.vf2 v10, v8 ; CHECK-RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma +; CHECK-RV64-NEXT: vzext.vf2 v10, v8 ; CHECK-RV64-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-RV64-NEXT: ret ; @@ -94,12 +90,11 @@ define @vwsll_vx_i64_nxv2i64( %a, i64 %b, < define @vwsll_vx_i32_nxv2i64_sext( %a, i32 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i32_nxv2i64_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -120,12 +115,11 @@ define @vwsll_vx_i32_nxv2i64_sext( %a, i32 define @vwsll_vx_i32_nxv2i64_zext( %a, i32 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i32_nxv2i64_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -146,12 +140,11 @@ define @vwsll_vx_i32_nxv2i64_zext( %a, i32 define @vwsll_vx_i16_nxv2i64_sext( %a, i16 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i16_nxv2i64_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf4 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -172,12 +165,11 @@ define @vwsll_vx_i16_nxv2i64_sext( %a, i16 define @vwsll_vx_i16_nxv2i64_zext( %a, i16 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i16_nxv2i64_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf4 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -198,12 +190,11 @@ define @vwsll_vx_i16_nxv2i64_zext( %a, i16 define @vwsll_vx_i8_nxv2i64_sext( %a, i8 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i8_nxv2i64_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf8 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -224,12 +215,11 @@ define @vwsll_vx_i8_nxv2i64_sext( %a, i8 %b define @vwsll_vx_i8_nxv2i64_zext( %a, i8 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i8_nxv2i64_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf4, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf8 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -250,9 +240,8 @@ define @vwsll_vx_i8_nxv2i64_zext( %a, i8 %b define @vwsll_vi_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vi_nxv2i64: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma -; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t ; CHECK-NEXT: ret ; @@ -276,10 +265,9 @@ declare @llvm.vp.shl.nxv4i32(, @vwsll_vv_nxv4i32_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -298,10 +286,9 @@ define @vwsll_vv_nxv4i32_sext( %a, @vwsll_vv_nxv4i32_zext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -320,17 +307,15 @@ define @vwsll_vv_nxv4i32_zext( %a, @vwsll_vx_i64_nxv4i32( %a, i64 %b, %m, i32 zeroext %vl) { ; CHECK-RV32-LABEL: vwsll_vx_i64_nxv4i32: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-RV32-NEXT: vzext.vf2 v10, v8 ; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma +; CHECK-RV32-NEXT: vzext.vf2 v10, v8 ; CHECK-RV32-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: vwsll_vx_i64_nxv4i32: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli a2, zero, e32, m2, ta, ma -; CHECK-RV64-NEXT: vzext.vf2 v10, v8 ; CHECK-RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-RV64-NEXT: vzext.vf2 v10, v8 ; CHECK-RV64-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-RV64-NEXT: ret ; @@ -358,9 +343,8 @@ define @vwsll_vx_i64_nxv4i32( %a, i64 %b, < define @vwsll_vx_i32_nxv4i32( %a, i32 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i32_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e32, m2, ta, ma -; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret ; @@ -380,12 +364,11 @@ define @vwsll_vx_i32_nxv4i32( %a, i32 %b, < define @vwsll_vx_i16_nxv4i32_sext( %a, i16 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i16_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -406,12 +389,11 @@ define @vwsll_vx_i16_nxv4i32_sext( %a, i16 define @vwsll_vx_i16_nxv4i32_zext( %a, i16 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i16_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -432,12 +414,11 @@ define @vwsll_vx_i16_nxv4i32_zext( %a, i16 define @vwsll_vx_i8_nxv4i32_sext( %a, i8 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i8_nxv4i32_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf4 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -458,12 +439,11 @@ define @vwsll_vx_i8_nxv4i32_sext( %a, i8 %b define @vwsll_vx_i8_nxv4i32_zext( %a, i8 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i8_nxv4i32_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, mf2, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf4 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -484,9 +464,8 @@ define @vwsll_vx_i8_nxv4i32_zext( %a, i8 %b define @vwsll_vi_nxv4i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vi_nxv4i32: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t ; CHECK-NEXT: ret ; @@ -511,10 +490,9 @@ declare @llvm.vp.shl.nxv8i16(, @vwsll_vv_nxv8i16_sext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv8i16_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -533,10 +511,9 @@ define @vwsll_vv_nxv8i16_sext( %a, @vwsll_vv_nxv8i16_zext( %a, %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vv_nxv8i16_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -555,17 +532,15 @@ define @vwsll_vv_nxv8i16_zext( %a, @vwsll_vx_i64_nxv8i16( %a, i64 %b, %m, i32 zeroext %vl) { ; CHECK-RV32-LABEL: vwsll_vx_i64_nxv8i16: ; CHECK-RV32: # %bb.0: -; CHECK-RV32-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-RV32-NEXT: vzext.vf2 v10, v8 ; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma +; CHECK-RV32-NEXT: vzext.vf2 v10, v8 ; CHECK-RV32-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: vwsll_vx_i64_nxv8i16: ; CHECK-RV64: # %bb.0: -; CHECK-RV64-NEXT: vsetvli a2, zero, e16, m2, ta, ma -; CHECK-RV64-NEXT: vzext.vf2 v10, v8 ; CHECK-RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-RV64-NEXT: vzext.vf2 v10, v8 ; CHECK-RV64-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-RV64-NEXT: ret ; @@ -593,9 +568,8 @@ define @vwsll_vx_i64_nxv8i16( %a, i64 %b, @vwsll_vx_i32_nxv8i16( %a, i32 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i32_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma -; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret ; @@ -616,9 +590,8 @@ define @vwsll_vx_i32_nxv8i16( %a, i32 %b, @vwsll_vx_i16_nxv8i16( %a, i16 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i16_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e16, m2, ta, ma -; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma +; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsll.vx v8, v10, a0, v0.t ; CHECK-NEXT: ret ; @@ -638,12 +611,11 @@ define @vwsll_vx_i16_nxv8i16( %a, i16 %b, @vwsll_vx_i8_nxv8i16_sext( %a, i8 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i8_nxv8i16_sext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -664,12 +636,11 @@ define @vwsll_vx_i8_nxv8i16_sext( %a, i8 %b, define @vwsll_vx_i8_nxv8i16_zext( %a, i8 %b, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vx_i8_nxv8i16_zext: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, ma +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.x v9, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vzext.vf2 v12, v9 -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma ; CHECK-NEXT: vsll.vv v8, v10, v12, v0.t ; CHECK-NEXT: ret ; @@ -690,9 +661,8 @@ define @vwsll_vx_i8_nxv8i16_zext( %a, i8 %b, define @vwsll_vi_nxv8i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vwsll_vi_nxv8i16: ; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vzext.vf2 v10, v8 ; CHECK-NEXT: vsll.vi v8, v10, 2, v0.t ; CHECK-NEXT: ret ; diff --git a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll index 4c9d9e5ffdf77..42c87c9660dc9 100644 --- a/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll +++ b/llvm/test/CodeGen/RISCV/srem-seteq-illegal-types.ll @@ -664,14 +664,15 @@ define void @test_srem_vec(ptr %X) nounwind { ; RV32MV-NEXT: vslidedown.vi v10, v10, 2 ; RV32MV-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32MV-NEXT: vand.vv v8, v10, v8 -; RV32MV-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32MV-NEXT: vsetivli zero, 3, e8, mf2, ta, ma ; RV32MV-NEXT: vmv.v.i v10, 1 +; RV32MV-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32MV-NEXT: vmv.v.i v11, 0 ; RV32MV-NEXT: vsetivli zero, 3, e8, mf2, tu, ma ; RV32MV-NEXT: vslideup.vi v11, v10, 2 -; RV32MV-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; RV32MV-NEXT: vsetivli zero, 5, e8, mf2, ta, ma ; RV32MV-NEXT: vmv.v.i v10, 2 -; RV32MV-NEXT: vsetivli zero, 5, e8, mf2, tu, ma +; RV32MV-NEXT: vsetvli zero, zero, e8, mf2, tu, ma ; RV32MV-NEXT: vslideup.vi v11, v10, 4 ; RV32MV-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32MV-NEXT: vsext.vf4 v12, v11