From 276a28f838c633e7daf84e97d83089a36bbac80d Mon Sep 17 00:00:00 2001 From: imkiva Date: Wed, 19 Jun 2024 13:57:55 +0800 Subject: [PATCH 1/6] [LLVM][XTHeadVector] support `vbool16/32/64` for vector mask operations --- .../RISCV/RISCVInstrInfoXTHeadVPseudos.td | 35 ++++++++++++++----- 1 file changed, 26 insertions(+), 9 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td index 4eb5a68534c59..4aafd174d4900 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoXTHeadVPseudos.td @@ -145,15 +145,32 @@ defset list AllWidenableIntToFloatXVectors = { def : VTypeInfoToWide; } +class XMTypeInfo { + ValueType Mask = Mas; + // {SEW, VLMul} values set a valid VType to deal with this mask type. + int SEW = Sew; + int Log2SEW = Log2Sew; + LMULInfo LMul = M; + string BX = Bx; // Appendix of mask operations. + // The pattern fragment which produces the AVL operand, representing the + // "natural" vector length for this mask type. For scalable masks this is + // VLMax. + OutPatFrag AVL = VLMax; +} + // Redefine `AllMasks` from RISCVInstrInfoVPseudos.td to remove fractionally-grouped register groups. -// TODO: riscv-v-intrinsics.pdf declares there are functions accepting vbool<16,32,64>_t, but they need -// to be connected to MF2, MF4, MF8, which are not supported by the 'V' extension 0.7.1. -defset list AllXMasks = { +defset list AllXMasks = { // vbool_t, = SEW/LMUL, we assume SEW=8 and corresponding LMUL. - def : MTypeInfo; - def : MTypeInfo; - def : MTypeInfo; - def : MTypeInfo; + def : XMTypeInfo; + def : XMTypeInfo; + def : XMTypeInfo; + def : XMTypeInfo; + + // Cannot assume SEW=8, as = SEW/LMUL, so LMUL = MF2/MF4/MF8, which is not supported. + // Instead, we assume LMUL=1, so SEW = * LMUL. + def : XMTypeInfo; + def : XMTypeInfo; + def : XMTypeInfo; } class GetXVTypePredicates { @@ -3981,7 +3998,7 @@ multiclass XVPatUnaryS_M : + XMTypeInfo mti> : Pat<(mti.Mask (!cast(intrinsic_name) (mti.Mask VR:$rs2), VLOpFrag)), @@ -3992,7 +4009,7 @@ class XVPatMaskUnaryNoMask : + XMTypeInfo mti> : Pat<(mti.Mask (!cast(intrinsic_name#"_mask") (mti.Mask VR:$merge), (mti.Mask VR:$rs2), From 1a276deb69906c237ceb6c7aae5d6ef159c0a3fb Mon Sep 17 00:00:00 2001 From: imkiva Date: Wed, 19 Jun 2024 14:20:38 +0800 Subject: [PATCH 2/6] [LLVM][XTHeadVector] correctly expand pseudos for `vmset/vmclr` --- llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp index e8c1658281cdb..d9591eb43dd1f 100644 --- a/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp +++ b/llvm/lib/Target/RISCV/RISCVExpandPseudoInsts.cpp @@ -146,18 +146,24 @@ bool RISCVExpandPseudo::expandMI(MachineBasicBlock &MBB, case RISCV::PseudoVMSET_M_B64: // vmset.m vd => vmxnor.mm vd, vd, vd return expandVMSET_VMCLR(MBB, MBBI, RISCV::VMXNOR_MM); + case RISCV::PseudoTH_VMCLR_M_B1: + case RISCV::PseudoTH_VMCLR_M_B2: + case RISCV::PseudoTH_VMCLR_M_B4: case RISCV::PseudoTH_VMCLR_M_B8: case RISCV::PseudoTH_VMCLR_M_B16: case RISCV::PseudoTH_VMCLR_M_B32: case RISCV::PseudoTH_VMCLR_M_B64: - // th.vmclr.m vd => th.vmxor.mm vd, vd, vd - return expandVMSET_VMCLR(MBB, MBBI, RISCV::TH_VMXOR_MM); + // th.vmclr.m vd => th.vmxor.mm vd, vd, vd + return expandVMSET_VMCLR(MBB, MBBI, RISCV::TH_VMXOR_MM); + case RISCV::PseudoTH_VMSET_M_B1: + case RISCV::PseudoTH_VMSET_M_B2: + case RISCV::PseudoTH_VMSET_M_B4: case RISCV::PseudoTH_VMSET_M_B8: case RISCV::PseudoTH_VMSET_M_B16: case RISCV::PseudoTH_VMSET_M_B32: case RISCV::PseudoTH_VMSET_M_B64: - // th.vmset.m vd => th.vmxnor.mm vd, vd, vd - return expandVMSET_VMCLR(MBB, MBBI, RISCV::TH_VMXNOR_MM); + // th.vmset.m vd => th.vmxnor.mm vd, vd, vd + return expandVMSET_VMCLR(MBB, MBBI, RISCV::TH_VMXNOR_MM); } return false; From 608e6d76b8071887f3de4ec907befb626b6651e7 Mon Sep 17 00:00:00 2001 From: imkiva Date: Wed, 19 Jun 2024 14:20:48 +0800 Subject: [PATCH 3/6] [LLVM][XTHeadVector] update corresponding tests --- llvm/test/CodeGen/RISCV/rvv0p71/vmand.ll | 60 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmandn.ll | 76 ++++++++++++++++++++--- llvm/test/CodeGen/RISCV/rvv0p71/vmclr.ll | 48 ++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmnand.ll | 60 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmnor.ll | 60 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmor.ll | 60 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmorn.ll | 60 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmset.ll | 48 ++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmxnor.ll | 60 ++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmxor.ll | 60 ++++++++++++++++++ 10 files changed, 584 insertions(+), 8 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmand.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmand.ll index adb7edd38ee95..4977a3a02c87a 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmand.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmand.ll @@ -4,6 +4,66 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmand.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmand_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmand.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmand.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmand.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmand_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmand.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmand.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmand.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmand_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmand.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmand.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmand.nxv8i1( , , diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmandn.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmandn.ll index 96213959475a7..322b1f09a41de 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmandn.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmandn.ll @@ -4,13 +4,73 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmandnot.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmandnot_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmandnot_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmandnot.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmandnot.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmandnot.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmandnot_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmandnot_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmandnot.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmandnot.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmandnot.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmandnot_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmandnot_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmandnot.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmandnot.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmandnot.nxv8i1( , , iXLen); -define @intrinsic_vmandn_mm_nxv8i1( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv8i1: +define @intrinsic_vmandnot_mm_nxv8i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmandnot_mm_nxv8i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: th.vsetvli zero, a0, e8, m1, d1 ; CHECK-NEXT: th.vmandnot.mm v0, v0, v8 @@ -29,8 +89,8 @@ declare @llvm.riscv.th.vmandnot.nxv16i1( , iXLen); -define @intrinsic_vmandn_mm_nxv16i1( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv16i1: +define @intrinsic_vmandnot_mm_nxv16i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmandnot_mm_nxv16i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: th.vsetvli zero, a0, e8, m2, d1 ; CHECK-NEXT: th.vmandnot.mm v0, v0, v8 @@ -49,8 +109,8 @@ declare @llvm.riscv.th.vmandnot.nxv32i1( , iXLen); -define @intrinsic_vmandn_mm_nxv32i1( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv32i1: +define @intrinsic_vmandnot_mm_nxv32i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmandnot_mm_nxv32i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: th.vsetvli zero, a0, e8, m4, d1 ; CHECK-NEXT: th.vmandnot.mm v0, v0, v8 @@ -69,8 +129,8 @@ declare @llvm.riscv.th.vmandnot.nxv64i1( , iXLen); -define @intrinsic_vmandn_mm_nxv64i1( %0, %1, iXLen %2) nounwind { -; CHECK-LABEL: intrinsic_vmandn_mm_nxv64i1: +define @intrinsic_vmandnot_mm_nxv64i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmandnot_mm_nxv64i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: th.vsetvli zero, a0, e8, m8, d1 ; CHECK-NEXT: th.vmandnot.mm v0, v0, v8 diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmclr.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmclr.ll index 1ad2ef99f6b3a..c8b194d0563a6 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmclr.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmclr.ll @@ -4,6 +4,54 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmclr.nxv1i1( + iXLen); + +define @intrinsic_vmclr_m_pseudo_nxv1i1(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmclr.m v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmclr.nxv1i1( + iXLen %0) + + ret %a +} + +declare @llvm.riscv.th.vmclr.nxv2i1( + iXLen); + +define @intrinsic_vmclr_m_pseudo_nxv2i1(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmclr.m v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmclr.nxv2i1( + iXLen %0) + + ret %a +} + +declare @llvm.riscv.th.vmclr.nxv4i1( + iXLen); + +define @intrinsic_vmclr_m_pseudo_nxv4i1(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmclr_m_pseudo_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmclr.m v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmclr.nxv4i1( + iXLen %0) + + ret %a +} + declare @llvm.riscv.th.vmclr.nxv8i1( iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmnand.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmnand.ll index 7ee694048c20d..9d9eac7c21c4c 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmnand.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmnand.ll @@ -4,6 +4,66 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmnand.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmnand_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmnand_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmnand.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmnand.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmnand.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmnand_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmnand_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmnand.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmnand.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmnand.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmnand_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmnand_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmnand.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmnand.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmnand.nxv8i1( , , diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmnor.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmnor.ll index 7c59bf51f5696..d2713152bcc4e 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmnor.ll @@ -4,6 +4,66 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmnor.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmnor_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmnor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmnor.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmnor.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmnor_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmnor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmnor.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmnor.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmnor_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmnor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmnor.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmnor.nxv8i1( , , diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmor.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmor.ll index 9c8d682ed391c..d38eb232aa448 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmor.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmor.ll @@ -4,6 +4,66 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmor.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmor_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmor.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmor.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmor_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmor.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmor.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmor_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmor.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmor.nxv8i1( , , diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmorn.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmorn.ll index 93884389e0d72..3fb1f708b8893 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmorn.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmorn.ll @@ -4,6 +4,66 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmornot.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmornot_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmornot_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmornot.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmornot.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmornot.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmornot_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmornot_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmornot.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmornot.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmornot.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmornot_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmornot_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmornot.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmornot.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmornot.nxv8i1( , , diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmset.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmset.ll index 27ac90264124a..841a0da0b4fe1 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmset.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmset.ll @@ -4,6 +4,54 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmset.nxv1i1( + iXLen); + +define @intrinsic_vmset_m_pseudo_nxv1i1(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmset.m v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmset.nxv1i1( + iXLen %0) + + ret %a +} + +declare @llvm.riscv.th.vmset.nxv2i1( + iXLen); + +define @intrinsic_vmset_m_pseudo_nxv2i1(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmset.m v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmset.nxv2i1( + iXLen %0) + + ret %a +} + +declare @llvm.riscv.th.vmset.nxv4i1( + iXLen); + +define @intrinsic_vmset_m_pseudo_nxv4i1(iXLen %0) nounwind { +; CHECK-LABEL: intrinsic_vmset_m_pseudo_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmset.m v0 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmset.nxv4i1( + iXLen %0) + + ret %a +} + declare @llvm.riscv.th.vmset.nxv8i1( iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmxnor.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmxnor.ll index cafc16b03e8fa..2c2994f1a1f41 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmxnor.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmxnor.ll @@ -4,6 +4,66 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmxnor.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmxnor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmxnor_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmxnor.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmxnor.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmxnor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmxnor_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmxnor.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmxnor.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmxnor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmxnor_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmxnor.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmxnor.nxv8i1( , , diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmxor.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmxor.ll index 46db0aae79fc3..d893484f25671 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmxor.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmxor.ll @@ -4,6 +4,66 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmxor.nxv1i1( + , + , + iXLen); + +define @intrinsic_vmxor_mm_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmxor_mm_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmxor.nxv1i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmxor.nxv2i1( + , + , + iXLen); + +define @intrinsic_vmxor_mm_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmxor_mm_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmxor.nxv2i1( + %0, + %1, + iXLen %2) + + ret %a +} + +declare @llvm.riscv.th.vmxor.nxv4i1( + , + , + iXLen); + +define @intrinsic_vmxor_mm_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmxor_mm_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmxor.nxv4i1( + %0, + %1, + iXLen %2) + + ret %a +} + declare @llvm.riscv.th.vmxor.nxv8i1( , , From a49f805b5c3014eb44da637b1b333a095c56fbc0 Mon Sep 17 00:00:00 2001 From: imkiva Date: Fri, 21 Jun 2024 12:56:45 +0800 Subject: [PATCH 4/6] [LLVM][XTHeadVector] update tests for `vmsof/vmsbf/vmsif` --- llvm/test/CodeGen/RISCV/rvv0p71/vmsbf.ll | 150 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmsif.ll | 150 +++++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmsof.ll | 150 +++++++++++++++++++++++ 3 files changed, 450 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmsbf.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmsbf.ll index a196e2b79235f..40a5233bde678 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmsbf.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmsbf.ll @@ -4,6 +4,156 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmsbf.nxv1i1( + , + iXLen); + +define @intrinsic_vmsbf_m_nxv1i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmsbf.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbf.nxv1i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsbf.mask.nxv1i1( + , + , + , + iXLen); + +define @intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmsbf.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbf.mask.nxv1i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + +declare @llvm.riscv.th.vmsbf.nxv2i1( + , + iXLen); + +define @intrinsic_vmsbf_m_nxv2i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmsbf.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbf.nxv2i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsbf.mask.nxv2i1( + , + , + , + iXLen); + +define @intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmsbf.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbf.mask.nxv2i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + +declare @llvm.riscv.th.vmsbf.nxv4i1( + , + iXLen); + +define @intrinsic_vmsbf_m_nxv4i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmsbf.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbf.nxv4i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsbf.mask.nxv4i1( + , + , + , + iXLen); + +define @intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsbf_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmsbf.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsbf.mask.nxv4i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + declare @llvm.riscv.th.vmsbf.nxv8i1( , iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmsif.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmsif.ll index 12affc01f3c47..2a2b48a8df2ba 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmsif.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmsif.ll @@ -4,6 +4,156 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmsif.nxv1i1( + , + iXLen); + +define @intrinsic_vmsif_m_nxv1i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmsif.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsif.nxv1i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsif.mask.nxv1i1( + , + , + , + iXLen); + +define @intrinsic_vmsif_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmsif.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsif.mask.nxv1i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + +declare @llvm.riscv.th.vmsif.nxv2i1( + , + iXLen); + +define @intrinsic_vmsif_m_nxv2i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmsif.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsif.nxv2i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsif.mask.nxv2i1( + , + , + , + iXLen); + +define @intrinsic_vmsif_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmsif.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsif.mask.nxv2i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + +declare @llvm.riscv.th.vmsif.nxv4i1( + , + iXLen); + +define @intrinsic_vmsif_m_nxv4i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsif_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmsif.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsif.nxv4i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsif.mask.nxv4i1( + , + , + , + iXLen); + +define @intrinsic_vmsif_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsif_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmsif.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsif.mask.nxv4i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + declare @llvm.riscv.th.vmsif.nxv8i1( , iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmsof.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmsof.ll index c7671707a8c9b..8d939ea9e3793 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmsof.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmsof.ll @@ -4,6 +4,156 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare @llvm.riscv.th.vmsof.nxv1i1( + , + iXLen); + +define @intrinsic_vmsof_m_nxv1i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmsof.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsof.nxv1i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsof.mask.nxv1i1( + , + , + , + iXLen); + +define @intrinsic_vmsof_mask_m_nxv1i1_nxv1i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv1i1_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmsof.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsof.mask.nxv1i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + +declare @llvm.riscv.th.vmsof.nxv2i1( + , + iXLen); + +define @intrinsic_vmsof_m_nxv2i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmsof.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsof.nxv2i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsof.mask.nxv2i1( + , + , + , + iXLen); + +define @intrinsic_vmsof_mask_m_nxv2i1_nxv2i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv2i1_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmsof.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsof.mask.nxv2i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + +declare @llvm.riscv.th.vmsof.nxv4i1( + , + iXLen); + +define @intrinsic_vmsof_m_nxv4i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmsof_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmsof.m v8, v0 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsof.nxv4i1( + %0, + iXLen %1) + ret %a +} + +declare @llvm.riscv.th.vmsof.mask.nxv4i1( + , + , + , + iXLen); + +define @intrinsic_vmsof_mask_m_nxv4i1_nxv4i1( %0, %1, %2, iXLen %3) nounwind { +; CHECK-LABEL: intrinsic_vmsof_mask_m_nxv4i1_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v10, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v9 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmsof.m v10, v8, v0.t +; CHECK-NEXT: th.vmv.v.v v0, v10 +; CHECK-NEXT: ret +entry: + %a = call @llvm.riscv.th.vmsof.mask.nxv4i1( + %0, + %1, + %2, + iXLen %3) + ret %a +} + declare @llvm.riscv.th.vmsof.nxv8i1( , iXLen); From c806b0245788e5ae9b3bde26e8eb2d6827665604 Mon Sep 17 00:00:00 2001 From: imkiva Date: Fri, 21 Jun 2024 13:29:09 +0800 Subject: [PATCH 5/6] [LLVM][XTHeadVector] update tests for `vmfirst/vmpopc` --- llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll | 171 +++++++++++++++++++++ llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll | 171 +++++++++++++++++++++ 2 files changed, 342 insertions(+) diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll index e47b961507fd4..0fc8c38f993d3 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll @@ -4,6 +4,177 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare iXLen @llvm.riscv.th.vmfirst.iXLen.nxv1i1( + , + iXLen); + +define iXLen @intrinsic_vmfirst_m_nxv1i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmfirst_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmfirst.iXLen.nxv1i1( + %0, + iXLen %1) + + ret iXLen %a +} + +; define iXLen @intrinsic_vmfirst_m_nxv1i1_zero( %0) nounwind { +; ; CHECK-LABEL: intrinsic_vmfirst_m_nxv1i1_zero: +; ; CHECK: # %bb.0: # %entry +; ; CHECK-NEXT: li a0, -1 +; ; CHECK-NEXT: ret +; entry: +; %a = call iXLen @llvm.riscv.th.vmfirst.iXLen.nxv1i1( +; %0, +; iXLen 0) +; +; ret iXLen %a +; } + +declare iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv1i1( + , + , + iXLen); + +define iXLen @intrinsic_vmfirst_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfirst_mask_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v9, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv1i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +; define iXLen @intrinsic_vmfirst_mask_m_nxv1i1_zero( %0, %1) nounwind { +; ; CHECK-LABEL: intrinsic_vmfirst_mask_m_nxv1i1_zero: +; ; CHECK: # %bb.0: # %entry +; ; CHECK-NEXT: li a0, -1 +; ; CHECK-NEXT: ret +; entry: +; %a = call iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv1i1( +; %0, +; %1, +; iXLen 0) +; +; ret iXLen %a +; } + +declare iXLen @llvm.riscv.th.vmfirst.iXLen.nxv2i1( + , + iXLen); + +define iXLen @intrinsic_vmfirst_m_nxv2i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmfirst_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmfirst.iXLen.nxv2i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv2i1( + , + , + iXLen); + +define iXLen @intrinsic_vmfirst_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfirst_mask_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v9, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv2i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.th.vmfirst.iXLen.nxv4i1( + , + iXLen); + +define iXLen @intrinsic_vmfirst_m_nxv4i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmfirst_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmfirst.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmfirst.iXLen.nxv4i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv4i1( + , + , + iXLen); + +define iXLen @intrinsic_vmfirst_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmfirst_mask_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v9, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmfirst.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv4i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + declare iXLen @llvm.riscv.th.vmfirst.iXLen.nxv8i1( , iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll index cf915657b26d7..0ff1ccfa2ce4f 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll @@ -4,6 +4,177 @@ ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+xtheadvector \ ; RUN: -verify-machineinstrs | FileCheck %s +declare iXLen @llvm.riscv.th.vmpopc.iXLen.nxv1i1( + , + iXLen); + +define iXLen @intrinsic_vmpopc_m_nxv1i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmpopc_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmpopc.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmpopc.iXLen.nxv1i1( + %0, + iXLen %1) + + ret iXLen %a +} + +; define iXLen @intrinsic_vmpopc_m_nxv1i1_zero( %0) nounwind { +; ; CHECK-LABEL: intrinsic_vmpopc_m_nxv1i1_zero: +; ; CHECK: # %bb.0: # %entry +; ; CHECK-NEXT: li a0, 0 +; ; CHECK-NEXT: ret +; entry: +; %a = call iXLen @llvm.riscv.th.vmpopc.iXLen.nxv1i1( +; %0, +; iXLen 0) +; +; ret iXLen %a +; } + +declare iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv1i1( + , + , + iXLen); + +define iXLen @intrinsic_vmpopc_mask_m_nxv1i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmpopc_mask_m_nxv1i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v9, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e64, m1, d1 +; CHECK-NEXT: th.vmpopc.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv1i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +; define iXLen @intrinsic_vmpopc_mask_m_nxv1i1_zero( %0, %1) nounwind { +; ; CHECK-LABEL: intrinsic_vmpopc_mask_m_nxv1i1_zero: +; ; CHECK: # %bb.0: # %entry +; ; CHECK-NEXT: li a0, 0 +; ; CHECK-NEXT: ret +; entry: +; %a = call iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv1i1( +; %0, +; %1, +; iXLen 0) +; +; ret iXLen %a +; } + +declare iXLen @llvm.riscv.th.vmpopc.iXLen.nxv2i1( + , + iXLen); + +define iXLen @intrinsic_vmpopc_m_nxv2i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmpopc_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmpopc.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmpopc.iXLen.nxv2i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv2i1( + , + , + iXLen); + +define iXLen @intrinsic_vmpopc_mask_m_nxv2i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmpopc_mask_m_nxv2i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v9, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e32, m1, d1 +; CHECK-NEXT: th.vmpopc.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv2i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.th.vmpopc.iXLen.nxv4i1( + , + iXLen); + +define iXLen @intrinsic_vmpopc_m_nxv4i1( %0, iXLen %1) nounwind { +; CHECK-LABEL: intrinsic_vmpopc_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmpopc.m a0, v0 +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmpopc.iXLen.nxv4i1( + %0, + iXLen %1) + + ret iXLen %a +} + +declare iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv4i1( + , + , + iXLen); + +define iXLen @intrinsic_vmpopc_mask_m_nxv4i1( %0, %1, iXLen %2) nounwind { +; CHECK-LABEL: intrinsic_vmpopc_mask_m_nxv4i1: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v9, v0 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: csrr a1, vl +; CHECK-NEXT: csrr a2, vtype +; CHECK-NEXT: th.vsetvli zero, zero, e8, m1, d1 +; CHECK-NEXT: th.vmv.v.v v0, v8 +; CHECK-NEXT: th.vsetvl zero, a1, a2 +; CHECK-NEXT: th.vsetvli zero, a0, e16, m1, d1 +; CHECK-NEXT: th.vmpopc.m a0, v9, v0.t +; CHECK-NEXT: ret +entry: + %a = call iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv4i1( + %0, + %1, + iXLen %2) + + ret iXLen %a +} + declare iXLen @llvm.riscv.th.vmpopc.iXLen.nxv8i1( , iXLen); From 4a2deae81f112431bd20b59d0aae4b1cce0a1619 Mon Sep 17 00:00:00 2001 From: imkiva Date: Fri, 21 Jun 2024 13:29:52 +0800 Subject: [PATCH 6/6] [LLVM][XTHeadVector] update tests for `vmfirst/vmpopc` --- llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll | 27 ---------------------- llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll | 27 ---------------------- 2 files changed, 54 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll index 0fc8c38f993d3..f50ae5dad3ef1 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmfirst.ll @@ -22,19 +22,6 @@ entry: ret iXLen %a } -; define iXLen @intrinsic_vmfirst_m_nxv1i1_zero( %0) nounwind { -; ; CHECK-LABEL: intrinsic_vmfirst_m_nxv1i1_zero: -; ; CHECK: # %bb.0: # %entry -; ; CHECK-NEXT: li a0, -1 -; ; CHECK-NEXT: ret -; entry: -; %a = call iXLen @llvm.riscv.th.vmfirst.iXLen.nxv1i1( -; %0, -; iXLen 0) -; -; ret iXLen %a -; } - declare iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv1i1( , , @@ -65,20 +52,6 @@ entry: ret iXLen %a } -; define iXLen @intrinsic_vmfirst_mask_m_nxv1i1_zero( %0, %1) nounwind { -; ; CHECK-LABEL: intrinsic_vmfirst_mask_m_nxv1i1_zero: -; ; CHECK: # %bb.0: # %entry -; ; CHECK-NEXT: li a0, -1 -; ; CHECK-NEXT: ret -; entry: -; %a = call iXLen @llvm.riscv.th.vmfirst.mask.iXLen.nxv1i1( -; %0, -; %1, -; iXLen 0) -; -; ret iXLen %a -; } - declare iXLen @llvm.riscv.th.vmfirst.iXLen.nxv2i1( , iXLen); diff --git a/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll b/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll index 0ff1ccfa2ce4f..51380b966c8a3 100644 --- a/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll +++ b/llvm/test/CodeGen/RISCV/rvv0p71/vmpopc.ll @@ -22,19 +22,6 @@ entry: ret iXLen %a } -; define iXLen @intrinsic_vmpopc_m_nxv1i1_zero( %0) nounwind { -; ; CHECK-LABEL: intrinsic_vmpopc_m_nxv1i1_zero: -; ; CHECK: # %bb.0: # %entry -; ; CHECK-NEXT: li a0, 0 -; ; CHECK-NEXT: ret -; entry: -; %a = call iXLen @llvm.riscv.th.vmpopc.iXLen.nxv1i1( -; %0, -; iXLen 0) -; -; ret iXLen %a -; } - declare iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv1i1( , , @@ -65,20 +52,6 @@ entry: ret iXLen %a } -; define iXLen @intrinsic_vmpopc_mask_m_nxv1i1_zero( %0, %1) nounwind { -; ; CHECK-LABEL: intrinsic_vmpopc_mask_m_nxv1i1_zero: -; ; CHECK: # %bb.0: # %entry -; ; CHECK-NEXT: li a0, 0 -; ; CHECK-NEXT: ret -; entry: -; %a = call iXLen @llvm.riscv.th.vmpopc.mask.iXLen.nxv1i1( -; %0, -; %1, -; iXLen 0) -; -; ret iXLen %a -; } - declare iXLen @llvm.riscv.th.vmpopc.iXLen.nxv2i1( , iXLen);