-
Notifications
You must be signed in to change notification settings - Fork 13k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[RISCV][VLOPT] Add support for mask-register logical instructions and set mask instructions #112231
[RISCV][VLOPT] Add support for mask-register logical instructions and set mask instructions #112231
Conversation
@llvm/pr-subscribers-backend-risc-v Author: Michael Maitland (michaelmaitland) ChangesFull diff: https://github.com/llvm/llvm-project/pull/112231.diff 2 Files Affected:
diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
index eb1f4df4ff7264..739db612ef5849 100644
--- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
+++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp
@@ -475,6 +475,28 @@ static OperandInfo getOperandInfo(const MachineInstr &MI,
return OperandInfo(EMUL, Log2EEW);
}
+ // 15. Vector Mask Instructions
+ // 15.1. Vector Mask-Register Logical Instructions
+ // 15.4. vmsbf.m set-before-first mask bit
+ // 15.6. vmsof.m set-only-first mask bit
+ // EEW=1 and EMUL=(EEW/SEW)*LMUL
+ // We handle the cases when operand is a v0 mask operand above the switch,
+ // but these instructions may use non-v0 mask operands and need to be handled
+ // specifically.
+ case RISCV::VMAND_MM:
+ case RISCV::VMNAND_MM:
+ case RISCV::VMANDN_MM:
+ case RISCV::VMXOR_MM:
+ case RISCV::VMOR_MM:
+ case RISCV::VMNOR_MM:
+ case RISCV::VMORN_MM:
+ case RISCV::VMXNOR_MM:
+ case RISCV::VMSBF_M:
+ case RISCV::VMSIF_M:
+ case RISCV::VMSOF_M: {
+ return OperandInfo(RISCVVType::getEMULEqualsEEWDivSEWTimesLMUL(0, MI), 0);
+ }
+
default:
return {};
}
@@ -565,6 +587,22 @@ static bool isSupportedInstr(const MachineInstr &MI) {
// Vector Crypto
case RISCV::VWSLL_VI:
+
+ // 15. Vector Mask Instructions
+ // 15.1. Vector Mask-Register Logical Instructions
+ // 15.4. vmsbf.m set-before-first mask bit
+ // 15.6. vmsof.m set-only-first mask bit
+ case RISCV::VMAND_MM:
+ case RISCV::VMNAND_MM:
+ case RISCV::VMANDN_MM:
+ case RISCV::VMXOR_MM:
+ case RISCV::VMOR_MM:
+ case RISCV::VMNOR_MM:
+ case RISCV::VMORN_MM:
+ case RISCV::VMXNOR_MM:
+ case RISCV::VMSBF_M:
+ case RISCV::VMSIF_M:
+ case RISCV::VMSOF_M:
return true;
}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
index 107252338829bd..7532cfdff2c148 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vl-opt-instrs.ll
@@ -971,3 +971,226 @@ define <vscale x 1 x i8> @vmerge_vvm(<vscale x 1 x i8> %a, i8 %b, <vscale x 1 x
%3 = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> %2, <vscale x 1 x i8> %c, <vscale x 1 x i1> %m, iXLen %vl)
ret <vscale x 1 x i8> %3
}
+
+
+define <vscale x 1 x i1> @vmand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmand_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmand_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmand.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmnand_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmnand_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmnand.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmnand_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmnand.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmnand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmandn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmandn_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmandn.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmandn_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmandn.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmandn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmxor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmxor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmxor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmxor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmxor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmxor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+
+define <vscale x 1 x i1> @vmnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmnor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmnor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmnor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmnor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmorn_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmorn_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmorn.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmorn_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmorn.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmorn.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmxnor_mm(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen %vl) {
+; NOVLOPT-LABEL: vmxnor_mm:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmxnor.mm v8, v0, v8
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmxnor_mm:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmxnor.mm v8, v0, v8
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmxnor.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %b, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmsbf_m(<vscale x 1 x i1> %a, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbf_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsbf.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmsif_m(<vscale x 1 x i1> %a, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbf_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsif.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
+define <vscale x 1 x i1> @vmsof_m(<vscale x 1 x i1> %a, iXLen %vl) {
+; NOVLOPT-LABEL: vmsbf_m:
+; NOVLOPT: # %bb.0:
+; NOVLOPT-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmsbf.m v8, v0
+; NOVLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; NOVLOPT-NEXT: vmand.mm v0, v0, v8
+; NOVLOPT-NEXT: ret
+;
+; VLOPT-LABEL: vmsbf_m:
+; VLOPT: # %bb.0:
+; VLOPT-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
+; VLOPT-NEXT: vmsbf.m v8, v0
+; VLOPT-NEXT: vmand.mm v0, v0, v8
+; VLOPT-NEXT: ret
+ %1 = call <vscale x 1 x i1> @llvm.riscv.vmsof.nxv1i1(<vscale x 1 x i1> %a, iXLen -1)
+ %2 = call <vscale x 1 x i1> @llvm.riscv.vmand.nxv1i1(<vscale x 1 x i1> %a, <vscale x 1 x i1> %1, iXLen %vl)
+ ret <vscale x 1 x i1> %2
+}
+
|
case RISCV::VMNOR_MM: | ||
case RISCV::VMORN_MM: | ||
case RISCV::VMXNOR_MM: | ||
case RISCV::VMSBF_M: |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The result of each element for vmsbf.m, vmsif.m, and vmsof.m is affected by that element and all previous elements in the source. That's different than and/nand/andn/xor/or/nor/orn/xnor. Does that need any special consideration in this pass?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There are 4 cases to consider for each of these instructions:
- There are no bits set
- There is a bit set on [1, %vl)
- The bit is set at %vl
- There is a bit set on [%vl + 1, VLMAX]
For vmsbf.m, case 1 behaves the same regardless of the VL being optimized or not, since [1, %vl] will be set in both cases. Case 2 also behaves the same since we will set before the first mask bit but not the mask bit and all bits after it up to %vl will not get set. Case 3 will also act the same since all bits [1, %vl) will get set. Case 4 also performs the same -- imagine %vl + 1 is set, then [1, %vl] gets set. Since we never read %vl + 1, we don't care that it didn't get set. So it is safe to optimize this instruction.
For vmsif.m, case 1 behaves the same regardless of the VL being optimized for the same reason as above. Case 2 also behaves the same -- imagine %vl -1 is set, we can still set %vl -1 and all bits before it. Case 3 also acts the same since [1, %vl] will get set. Case 4 also acts the same -- imagine %vl + 1 is set, then [1, %vl] is set. We never read %vl + 1, so we don't care whether it got set or not. So it is safe to optimize this instruction.
For vmsof.m, case 1 behaves the same since no bits will be set on [1, %vl], just like the non-optimized version. Case 2 also behaves the same since we can set the active bit in [1, %vl) and all other bits on [1, %vl) will be zero. Case 3 also behaves the same since %vl gets set but no other bits on [1, %vl) will be active. Case 4 also behaves the same since all bits [1, %vl] will be inactive, and we never read %vl, so it doesn't matter if it is set or not. So it is safe to optimize this instruction.
Based on the consideration of all of these cases, I think it needs no special consideration. I believe there is no issue if we replace VLMAX with some other %vl2 which is larger than %vl.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The way I'm thinking about it is that reducing the VL for these instructions doesn't change the result of the active elements. And the only demanded elements should be the active elements, so it shouldn't affect the result.
ping |
Now that we have testing of all instructions in the isSupportedInstr switch, and better coverage of getOperandInfo, I think it is a good time to enable this by default. I'd like for llvm#112231 and llvm#119416 to land before this patch, so it'd be great for anyone reviewing this to check those out first.
40179b3
to
40cfd39
Compare
; CHECK: %x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, 1, 3 /* e8 */ | ||
; CHECK-NEXT: %y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 3 /* e8 */ | ||
%x:vr = PseudoVMAND_MM_M1 $noreg, $noreg, -1, 3 /* e8 */ | ||
%y:vr = PseudoVMAND_MM_M1 $noreg, %x, 1, 3 /* e8 */ |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The EEW for a VMAND should always be 0. We use 0 to mark it as a mask instruction for the vsetvli insertion pass.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Updated.
6274d30
to
2999459
Compare
There are deeper problems. The Log2SEW read from the SEWOp in getEMULEqualsEEWDivSEWTimesLMUL, getOperandInfo, etc. needs to be changed from 0 to 3 to make the SEW/LMUL ratio correct. It appears to work correctly because we are only propagating from a mask instruction to another mask instruction. If we ever want to propagate from a mask operand to a mask instruction it is wrong. Do we support propagating from a mask instruction to a compare instruction like vmseq? |
I have a patch for that in my local, but we currently return Unknown for it to stop propagation. |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
Now that we have testing of all instructions in the isSupportedInstr switch, and better coverage of getOperandInfo, I think it is a good time to enable this by default. I'd like for llvm#112231 and llvm#119416 to land before this patch, so it'd be great for anyone reviewing this to check those out first.
We need to adjust getEMULEqualsEEWDivSEWTimesLMUL to account for the fact that Log2EEW for mask instructions is 0 but their EMUL is calculated using Log2EEW=3.