From db06f1c44085f6214bf2b2f4d324b9f398792780 Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 15:59:17 +0800 Subject: [PATCH 01/12] [Clang][XTHeadVector] make wrappers default to TAMU policy --- .../Basic/riscv_vector_xtheadv_wrappers.td | 3492 ++++++++--------- 1 file changed, 1746 insertions(+), 1746 deletions(-) diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td index 16f5e8e25af82..cb5dfdad7d76f 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td @@ -1846,73 +1846,73 @@ let HeaderCode = #define __riscv_vmseq_vx_u64m4_b16(op1, op2, vl) __riscv_th_vmseq_vx_u64m4_b16(op1, op2, vl) #define __riscv_vmseq_vx_u64m8_b8(op1, op2, vl) __riscv_th_vmseq_vx_u64m8_b8(op1, op2, vl) -#define __riscv_vmseq_vv_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_i64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmseq_vv_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vv_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vv_u64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmseq_vx_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_i64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmseq_vx_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmseq_vx_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmseq_vx_u64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmseq_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmseq_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmseq_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmseq_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmseq_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmsne_vv_i8m1_b8(op1, op2, vl) __riscv_th_vmsne_vv_i8m1_b8(op1, op2, vl) #define __riscv_vmsne_vv_i8m2_b4(op1, op2, vl) __riscv_th_vmsne_vv_i8m2_b4(op1, op2, vl) @@ -1982,73 +1982,73 @@ let HeaderCode = #define __riscv_vmsne_vx_u64m4_b16(op1, op2, vl) __riscv_th_vmsne_vx_u64m4_b16(op1, op2, vl) #define __riscv_vmsne_vx_u64m8_b8(op1, op2, vl) __riscv_th_vmsne_vx_u64m8_b8(op1, op2, vl) -#define __riscv_vmsne_vv_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_i64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsne_vv_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vv_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vv_u64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsne_vx_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_i64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsne_vx_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsne_vx_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsne_vx_u64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmsne_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsne_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsne_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsne_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsne_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsne_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmsge_vv_i8m1_b8(op1, op2, vl) __riscv_th_vmsge_vv_i8m1_b8(op1, op2, vl) #define __riscv_vmsge_vv_i8m2_b4(op1, op2, vl) __riscv_th_vmsge_vv_i8m2_b4(op1, op2, vl) @@ -2084,39 +2084,39 @@ let HeaderCode = #define __riscv_vmsge_vx_i64m4_b16(op1, op2, vl) __riscv_th_vmsge_vx_i64m4_b16(op1, op2, vl) #define __riscv_vmsge_vx_i64m8_b8(op1, op2, vl) __riscv_th_vmsge_vx_i64m8_b8(op1, op2, vl) -#define __riscv_vmsge_vv_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsge_vv_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vv_i64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsge_vx_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsge_vx_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsge_vx_i64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmsge_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsge_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsge_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsge_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmsgt_vv_i8m1_b8(op1, op2, vl) __riscv_th_vmsgt_vv_i8m1_b8(op1, op2, vl) #define __riscv_vmsgt_vv_i8m2_b4(op1, op2, vl) __riscv_th_vmsgt_vv_i8m2_b4(op1, op2, vl) @@ -2152,39 +2152,39 @@ let HeaderCode = #define __riscv_vmsgt_vx_i64m4_b16(op1, op2, vl) __riscv_th_vmsgt_vx_i64m4_b16(op1, op2, vl) #define __riscv_vmsgt_vx_i64m8_b8(op1, op2, vl) __riscv_th_vmsgt_vx_i64m8_b8(op1, op2, vl) -#define __riscv_vmsgt_vv_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vv_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vv_i64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsgt_vx_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgt_vx_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsgt_vx_i64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmsgt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsgt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmslt_vv_i8m1_b8(op1, op2, vl) __riscv_th_vmslt_vv_i8m1_b8(op1, op2, vl) #define __riscv_vmslt_vv_i8m2_b4(op1, op2, vl) __riscv_th_vmslt_vv_i8m2_b4(op1, op2, vl) @@ -2220,39 +2220,39 @@ let HeaderCode = #define __riscv_vmslt_vx_i64m4_b16(op1, op2, vl) __riscv_th_vmslt_vx_i64m4_b16(op1, op2, vl) #define __riscv_vmslt_vx_i64m8_b8(op1, op2, vl) __riscv_th_vmslt_vx_i64m8_b8(op1, op2, vl) -#define __riscv_vmslt_vv_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmslt_vv_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vv_i64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmslt_vx_i8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmslt_vx_i64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmslt_vx_i64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmslt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vv_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmslt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmslt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmslt_vx_i64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmsgeu_vv_u8m1_b8(op1, op2, vl) __riscv_th_vmsgeu_vv_u8m1_b8(op1, op2, vl) #define __riscv_vmsgeu_vv_u8m2_b4(op1, op2, vl) __riscv_th_vmsgeu_vv_u8m2_b4(op1, op2, vl) @@ -2288,39 +2288,39 @@ let HeaderCode = #define __riscv_vmsgeu_vx_u64m4_b16(op1, op2, vl) __riscv_th_vmsgeu_vx_u64m4_b16(op1, op2, vl) #define __riscv_vmsgeu_vx_u64m8_b8(op1, op2, vl) __riscv_th_vmsgeu_vx_u64m8_b8(op1, op2, vl) -#define __riscv_vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmsgeu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsgeu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgeu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgeu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmsgtu_vv_u8m1_b8(op1, op2, vl) __riscv_th_vmsgtu_vv_u8m1_b8(op1, op2, vl) #define __riscv_vmsgtu_vv_u8m2_b4(op1, op2, vl) __riscv_th_vmsgtu_vv_u8m2_b4(op1, op2, vl) @@ -2356,39 +2356,39 @@ let HeaderCode = #define __riscv_vmsgtu_vx_u64m4_b16(op1, op2, vl) __riscv_th_vmsgtu_vx_u64m4_b16(op1, op2, vl) #define __riscv_vmsgtu_vx_u64m8_b8(op1, op2, vl) __riscv_th_vmsgtu_vx_u64m8_b8(op1, op2, vl) -#define __riscv_vmsgtu_vv_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vv_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsgtu_vx_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsgtu_vx_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmsgtu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsgtu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsgtu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsgtu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmsltu_vv_u8m1_b8(op1, op2, vl) __riscv_th_vmsltu_vv_u8m1_b8(op1, op2, vl) #define __riscv_vmsltu_vv_u8m2_b4(op1, op2, vl) __riscv_th_vmsltu_vv_u8m2_b4(op1, op2, vl) @@ -2424,39 +2424,39 @@ let HeaderCode = #define __riscv_vmsltu_vx_u64m4_b16(op1, op2, vl) __riscv_th_vmsltu_vx_u64m4_b16(op1, op2, vl) #define __riscv_vmsltu_vx_u64m8_b8(op1, op2, vl) __riscv_th_vmsltu_vx_u64m8_b8(op1, op2, vl) -#define __riscv_vmsltu_vv_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vv_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vv_u64m8_b8_m(mask, op1, op2, vl) - -#define __riscv_vmsltu_vx_u8m1_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u8m1_b8_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u8m2_b4_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u8m2_b4_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u8m4_b2_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u8m4_b2_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u8m8_b1_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u8m8_b1_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmsltu_vx_u64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmsltu_vx_u64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmsltu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vv_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) + +#define __riscv_vmsltu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u8m1_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u8m2_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u8m4_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u8m8_b1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmsltu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmsltu_vx_u64m8_b8_mu(mask, maskedoff, op1, op2, vl) }] in def th_integer_comparison_wrapper_macros: RVVHeader; @@ -2596,38 +2596,38 @@ let HeaderCode = #define __riscv_vsadd_vv_i64m8(op1, op2, vl) __riscv_th_vsadd_vv_i64m8(op1, op2, vl) #define __riscv_vsadd_vx_i64m8(op1, op2, vl) __riscv_th_vsadd_vx_i64m8(op1, op2, vl) -#define __riscv_vsadd_vv_i8m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i8m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i8m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i8m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i8m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i8m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i8m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i8m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i8m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i8m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i8m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i8m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i8m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i8m8_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i8m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i8m8_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i16m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i16m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i16m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i16m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i16m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i16m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i16m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i16m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i16m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i16m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i16m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i16m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i16m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i16m8_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i16m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i16m8_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i32m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i32m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i32m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i32m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i32m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i32m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i32m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i32m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i32m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i32m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i32m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i32m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i32m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i32m8_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i32m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i32m8_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i64m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i64m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i64m1_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i64m1_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i64m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i64m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i64m2_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i64m2_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i64m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i64m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i64m4_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i64m4_m(mask, op1, op2, vl) -#define __riscv_vsadd_vv_i64m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vv_i64m8_m(mask, op1, op2, vl) -#define __riscv_vsadd_vx_i64m8_m(mask, op1, op2, vl) __riscv_th_vsadd_vx_i64m8_m(mask, op1, op2, vl) +#define __riscv_vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vv_i64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsadd_vx_i64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vssub_vv_i8m1(op1, op2, vl) __riscv_th_vssub_vv_i8m1(op1, op2, vl) #define __riscv_vssub_vx_i8m1(op1, op2, vl) __riscv_th_vssub_vx_i8m1(op1, op2, vl) @@ -2662,38 +2662,38 @@ let HeaderCode = #define __riscv_vssub_vv_i64m8(op1, op2, vl) __riscv_th_vssub_vv_i64m8(op1, op2, vl) #define __riscv_vssub_vx_i64m8(op1, op2, vl) __riscv_th_vssub_vx_i64m8(op1, op2, vl) -#define __riscv_vssub_vv_i8m1_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i8m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i8m1_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i8m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i8m2_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i8m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i8m2_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i8m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i8m4_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i8m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i8m4_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i8m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i8m8_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i8m8_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i8m8_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i8m8_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i16m1_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i16m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i16m1_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i16m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i16m2_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i16m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i16m2_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i16m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i16m4_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i16m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i16m4_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i16m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i16m8_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i16m8_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i16m8_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i16m8_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i32m1_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i32m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i32m1_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i32m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i32m2_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i32m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i32m2_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i32m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i32m4_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i32m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i32m4_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i32m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i32m8_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i32m8_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i32m8_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i32m8_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i64m1_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i64m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i64m1_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i64m1_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i64m2_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i64m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i64m2_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i64m2_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i64m4_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i64m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i64m4_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i64m4_m(mask, op1, op2, vl) -#define __riscv_vssub_vv_i64m8_m(mask, op1, op2, vl) __riscv_th_vssub_vv_i64m8_m(mask, op1, op2, vl) -#define __riscv_vssub_vx_i64m8_m(mask, op1, op2, vl) __riscv_th_vssub_vx_i64m8_m(mask, op1, op2, vl) +#define __riscv_vssub_vv_i8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vv_i64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vv_i64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssub_vx_i64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssub_vx_i64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vsaddu_vv_u8m1(op1, op2, vl) __riscv_th_vsaddu_vv_u8m1(op1, op2, vl) #define __riscv_vsaddu_vx_u8m1(op1, op2, vl) __riscv_th_vsaddu_vx_u8m1(op1, op2, vl) @@ -2728,38 +2728,38 @@ let HeaderCode = #define __riscv_vsaddu_vv_u64m8(op1, op2, vl) __riscv_th_vsaddu_vv_u64m8(op1, op2, vl) #define __riscv_vsaddu_vx_u64m8(op1, op2, vl) __riscv_th_vsaddu_vx_u64m8(op1, op2, vl) -#define __riscv_vsaddu_vv_u8m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u8m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u8m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u8m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u8m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u8m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u8m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u8m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u8m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u8m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u8m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u8m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u8m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u8m8_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u8m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u8m8_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u16m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u16m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u16m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u16m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u16m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u16m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u16m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u16m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u16m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u16m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u16m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u16m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u16m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u16m8_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u16m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u16m8_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u32m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u32m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u32m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u32m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u32m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u32m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u32m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u32m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u32m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u32m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u32m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u32m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u32m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u32m8_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u32m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u32m8_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u64m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u64m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u64m1_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u64m1_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u64m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u64m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u64m2_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u64m2_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u64m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u64m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u64m4_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u64m4_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vv_u64m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vv_u64m8_m(mask, op1, op2, vl) -#define __riscv_vsaddu_vx_u64m8_m(mask, op1, op2, vl) __riscv_th_vsaddu_vx_u64m8_m(mask, op1, op2, vl) +#define __riscv_vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vsaddu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vssubu_vv_u8m1(op1, op2, vl) __riscv_th_vssubu_vv_u8m1(op1, op2, vl) #define __riscv_vssubu_vx_u8m1(op1, op2, vl) __riscv_th_vssubu_vx_u8m1(op1, op2, vl) @@ -2794,38 +2794,38 @@ let HeaderCode = #define __riscv_vssubu_vv_u64m8(op1, op2, vl) __riscv_th_vssubu_vv_u64m8(op1, op2, vl) #define __riscv_vssubu_vx_u64m8(op1, op2, vl) __riscv_th_vssubu_vx_u64m8(op1, op2, vl) -#define __riscv_vssubu_vv_u8m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u8m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u8m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u8m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u8m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u8m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u8m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u8m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u8m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u8m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u8m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u8m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u8m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u8m8_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u8m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u8m8_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u16m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u16m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u16m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u16m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u16m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u16m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u16m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u16m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u16m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u16m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u16m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u16m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u16m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u16m8_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u16m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u16m8_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u32m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u32m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u32m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u32m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u32m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u32m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u32m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u32m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u32m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u32m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u32m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u32m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u32m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u32m8_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u32m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u32m8_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u64m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u64m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u64m1_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u64m1_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u64m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u64m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u64m2_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u64m2_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u64m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u64m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u64m4_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u64m4_m(mask, op1, op2, vl) -#define __riscv_vssubu_vv_u64m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vv_u64m8_m(mask, op1, op2, vl) -#define __riscv_vssubu_vx_u64m8_m(mask, op1, op2, vl) __riscv_th_vssubu_vx_u64m8_m(mask, op1, op2, vl) +#define __riscv_vssubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u8m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u8m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u8m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u8m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vv_u64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vssubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vssubu_vx_u64m8_mu(mask, maskedoff, op1, op2, vl) }] in def th_single_width_saturating_add_wrapper_macros: RVVHeader; @@ -2868,38 +2868,38 @@ let HeaderCode = #define __riscv_vaadd_vv_i64m8(op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m8(op1, op2, rm, vl) #define __riscv_vaadd_vx_i64m8(op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m8(op1, op2, rm, vl) -#define __riscv_vaadd_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m8_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m8_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m8_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m8_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m8_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m8_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m1_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m2_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m4_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m8_m(mask, op1, op2, rm, vl) -#define __riscv_vaadd_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m8_m(mask, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i8m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i8m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i16m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i16m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i32m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i32m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vv_i64m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vaadd_vx_i64m8_mu(mask, maskedoff, op1, op2, rm, vl) #define __riscv_vasub_vv_i8m1(op1, op2, rm, vl) __riscv_th_vasub_vv_i8m1(op1, op2, rm, vl) #define __riscv_vasub_vx_i8m1(op1, op2, rm, vl) __riscv_th_vasub_vx_i8m1(op1, op2, rm, vl) @@ -2934,38 +2934,38 @@ let HeaderCode = #define __riscv_vasub_vv_i64m8(op1, op2, rm, vl) __riscv_th_vasub_vv_i64m8(op1, op2, rm, vl) #define __riscv_vasub_vx_i64m8(op1, op2, rm, vl) __riscv_th_vasub_vx_i64m8(op1, op2, rm, vl) -#define __riscv_vasub_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m8_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m8_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m8_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m8_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m8_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m8_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m1_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m2_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m4_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m8_m(mask, op1, op2, rm, vl) -#define __riscv_vasub_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m8_m(mask, op1, op2, rm, vl) +#define __riscv_vasub_vv_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i8m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i8m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i8m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i8m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i16m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i16m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i16m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i16m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i32m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i32m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i32m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i32m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i64m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i64m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i64m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i64m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i64m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i64m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vv_i64m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vv_i64m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vasub_vx_i64m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vasub_vx_i64m8_mu(mask, maskedoff, op1, op2, rm, vl) }] in def th_single_width_averaging_add_and_subtract_wrapper_macros: RVVHeader; @@ -3008,38 +3008,38 @@ let HeaderCode = #define __riscv_vsmul_vv_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8(op1, op2, rm, vl) #define __riscv_vsmul_vx_i64m8(op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8(op1, op2, rm, vl) -#define __riscv_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8_m(mask, op1, op2, rm, vl) -#define __riscv_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8_m(mask, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i8m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i8m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i8m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i8m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i16m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i16m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i16m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i16m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i32m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i32m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i32m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vv_i64m8_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vsmul_vx_i64m8_mu(mask, maskedoff, op1, op2, rm, vl) }] in def th_single_width_fractional_multiply_with_rounding_and_saturation_wrapper_macros: RVVHeader; @@ -3068,24 +3068,24 @@ let HeaderCode = #define __riscv_vnclip_wv_i32m4(op1, op2, rm, vl) __riscv_th_vnclip_wv_i32m4(op1, op2, rm, vl) #define __riscv_vnclip_wx_i32m4(op1, op2, rm, vl) __riscv_th_vnclip_wx_i32m4(op1, op2, rm, vl) -#define __riscv_vnclip_wv_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i8m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i8m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i8m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i16m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i16m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i16m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i32m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i32m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wv_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wv_i32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclip_wx_i32m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclip_wx_i32m4_m(mask, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wv_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wv_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclip_wx_i32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclip_wx_i32m4_mu(mask, maskedoff, op1, op2, rm, vl) #define __riscv_vnclipu_wv_u8m1(op1, op2, rm, vl) __riscv_th_vnclipu_wv_u8m1(op1, op2, rm, vl) #define __riscv_vnclipu_wx_u8m1(op1, op2, rm, vl) __riscv_th_vnclipu_wx_u8m1(op1, op2, rm, vl) @@ -3106,24 +3106,24 @@ let HeaderCode = #define __riscv_vnclipu_wv_u32m4(op1, op2, rm, vl) __riscv_th_vnclipu_wv_u32m4(op1, op2, rm, vl) #define __riscv_vnclipu_wx_u32m4(op1, op2, rm, vl) __riscv_th_vnclipu_wx_u32m4(op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u8m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u8m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u8m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u8m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u8m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u8m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u8m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u8m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u8m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u16m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u16m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u16m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u16m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u16m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u16m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u16m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u16m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u16m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u32m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u32m1_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u32m1_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u32m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u32m2_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u32m2_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wv_u32m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u32m4_m(mask, op1, op2, rm, vl) -#define __riscv_vnclipu_wx_u32m4_m(mask, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u32m4_m(mask, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u8m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u8m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u8m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u8m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u8m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u8m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u16m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u16m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u16m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u16m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u16m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u16m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u32m1_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u32m1_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u32m2_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u32m2_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wv_u32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wv_u32m4_mu(mask, maskedoff, op1, op2, rm, vl) +#define __riscv_vnclipu_wx_u32m4_m(mask, maskedoff, op1, op2, rm, vl) __riscv_th_vnclipu_wx_u32m4_mu(mask, maskedoff, op1, op2, rm, vl) }] in def th_narrowing_width_fixed_point_clip_wrapper_macros: RVVHeader; @@ -3157,30 +3157,30 @@ let HeaderCode = #define __riscv_vfadd_vf_f64m4(op1, op2, vl) __riscv_th_vfadd_vf_f64m4(op1, op2, vl) #define __riscv_vfadd_vv_f64m8(op1, op2, vl) __riscv_th_vfadd_vv_f64m8(op1, op2, vl) #define __riscv_vfadd_vf_f64m8(op1, op2, vl) __riscv_th_vfadd_vf_f64m8(op1, op2, vl) -#define __riscv_vfadd_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfadd_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfadd_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfadd_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfadd_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfadd_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfdiv_vv_f16m1(op1, op2, vl) __riscv_th_vfdiv_vv_f16m1(op1, op2, vl) #define __riscv_vfdiv_vf_f16m1(op1, op2, vl) __riscv_th_vfdiv_vf_f16m1(op1, op2, vl) #define __riscv_vfdiv_vv_f16m2(op1, op2, vl) __riscv_th_vfdiv_vv_f16m2(op1, op2, vl) @@ -3205,30 +3205,30 @@ let HeaderCode = #define __riscv_vfdiv_vf_f64m4(op1, op2, vl) __riscv_th_vfdiv_vf_f64m4(op1, op2, vl) #define __riscv_vfdiv_vv_f64m8(op1, op2, vl) __riscv_th_vfdiv_vv_f64m8(op1, op2, vl) #define __riscv_vfdiv_vf_f64m8(op1, op2, vl) __riscv_th_vfdiv_vf_f64m8(op1, op2, vl) -#define __riscv_vfdiv_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfdiv_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfdiv_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfdiv_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfmul_vv_f16m1(op1, op2, vl) __riscv_th_vfmul_vv_f16m1(op1, op2, vl) #define __riscv_vfmul_vf_f16m1(op1, op2, vl) __riscv_th_vfmul_vf_f16m1(op1, op2, vl) #define __riscv_vfmul_vv_f16m2(op1, op2, vl) __riscv_th_vfmul_vv_f16m2(op1, op2, vl) @@ -3253,30 +3253,30 @@ let HeaderCode = #define __riscv_vfmul_vf_f64m4(op1, op2, vl) __riscv_th_vfmul_vf_f64m4(op1, op2, vl) #define __riscv_vfmul_vv_f64m8(op1, op2, vl) __riscv_th_vfmul_vv_f64m8(op1, op2, vl) #define __riscv_vfmul_vf_f64m8(op1, op2, vl) __riscv_th_vfmul_vf_f64m8(op1, op2, vl) -#define __riscv_vfmul_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfmul_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfmul_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfmul_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfmul_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfrdiv_vf_f16m1(op1, op2, vl) __riscv_th_vfrdiv_vf_f16m1(op1, op2, vl) #define __riscv_vfrdiv_vf_f16m2(op1, op2, vl) __riscv_th_vfrdiv_vf_f16m2(op1, op2, vl) #define __riscv_vfrdiv_vf_f16m4(op1, op2, vl) __riscv_th_vfrdiv_vf_f16m4(op1, op2, vl) @@ -3289,18 +3289,18 @@ let HeaderCode = #define __riscv_vfrdiv_vf_f64m2(op1, op2, vl) __riscv_th_vfrdiv_vf_f64m2(op1, op2, vl) #define __riscv_vfrdiv_vf_f64m4(op1, op2, vl) __riscv_th_vfrdiv_vf_f64m4(op1, op2, vl) #define __riscv_vfrdiv_vf_f64m8(op1, op2, vl) __riscv_th_vfrdiv_vf_f64m8(op1, op2, vl) -#define __riscv_vfrdiv_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfrdiv_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfrdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrdiv_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfrsub_vf_f16m1(op1, op2, vl) __riscv_th_vfrsub_vf_f16m1(op1, op2, vl) #define __riscv_vfrsub_vf_f16m2(op1, op2, vl) __riscv_th_vfrsub_vf_f16m2(op1, op2, vl) #define __riscv_vfrsub_vf_f16m4(op1, op2, vl) __riscv_th_vfrsub_vf_f16m4(op1, op2, vl) @@ -3313,18 +3313,18 @@ let HeaderCode = #define __riscv_vfrsub_vf_f64m2(op1, op2, vl) __riscv_th_vfrsub_vf_f64m2(op1, op2, vl) #define __riscv_vfrsub_vf_f64m4(op1, op2, vl) __riscv_th_vfrsub_vf_f64m4(op1, op2, vl) #define __riscv_vfrsub_vf_f64m8(op1, op2, vl) __riscv_th_vfrsub_vf_f64m8(op1, op2, vl) -#define __riscv_vfrsub_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfrsub_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfrsub_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfrsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfsub_vv_f16m1(op1, op2, vl) __riscv_th_vfsub_vv_f16m1(op1, op2, vl) #define __riscv_vfsub_vf_f16m1(op1, op2, vl) __riscv_th_vfsub_vf_f16m1(op1, op2, vl) #define __riscv_vfsub_vv_f16m2(op1, op2, vl) __riscv_th_vfsub_vv_f16m2(op1, op2, vl) @@ -3349,30 +3349,30 @@ let HeaderCode = #define __riscv_vfsub_vf_f64m4(op1, op2, vl) __riscv_th_vfsub_vf_f64m4(op1, op2, vl) #define __riscv_vfsub_vv_f64m8(op1, op2, vl) __riscv_th_vfsub_vv_f64m8(op1, op2, vl) #define __riscv_vfsub_vf_f64m8(op1, op2, vl) __riscv_th_vfsub_vf_f64m8(op1, op2, vl) -#define __riscv_vfsub_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsub_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsub_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfsub_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsub_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfsub_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfwadd_wf_f32m1(op1, op2, vl) __riscv_th_vfwadd_wf_f32m1(op1, op2, vl) #define __riscv_vfwadd_vv_f32m2(op1, op2, vl) __riscv_th_vfwadd_vv_f32m2(op1, op2, vl) #define __riscv_vfwadd_vf_f32m2(op1, op2, vl) __riscv_th_vfwadd_vf_f32m2(op1, op2, vl) @@ -3399,32 +3399,32 @@ let HeaderCode = #define __riscv_vfwadd_vf_f64m8(op1, op2, vl) __riscv_th_vfwadd_vf_f64m8(op1, op2, vl) #define __riscv_vfwadd_wv_f64m8(op1, op2, vl) __riscv_th_vfwadd_wv_f64m8(op1, op2, vl) #define __riscv_vfwadd_wf_f64m8(op1, op2, vl) __riscv_th_vfwadd_wf_f64m8(op1, op2, vl) -#define __riscv_vfwadd_wf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_wv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_wv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_wv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_wv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_wv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfwadd_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_vf_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_wv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfwadd_wf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwadd_wf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfwadd_wf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwadd_wf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwadd_wf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfwsub_wf_f32m1(op1, op2, vl) __riscv_th_vfwsub_wf_f32m1(op1, op2, vl) #define __riscv_vfwsub_vv_f32m2(op1, op2, vl) __riscv_th_vfwsub_vv_f32m2(op1, op2, vl) #define __riscv_vfwsub_vf_f32m2(op1, op2, vl) __riscv_th_vfwsub_vf_f32m2(op1, op2, vl) @@ -3451,32 +3451,32 @@ let HeaderCode = #define __riscv_vfwsub_vf_f64m8(op1, op2, vl) __riscv_th_vfwsub_vf_f64m8(op1, op2, vl) #define __riscv_vfwsub_wv_f64m8(op1, op2, vl) __riscv_th_vfwsub_wv_f64m8(op1, op2, vl) #define __riscv_vfwsub_wf_f64m8(op1, op2, vl) __riscv_th_vfwsub_wf_f64m8(op1, op2, vl) -#define __riscv_vfwsub_wf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_wv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_wv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_wv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_wv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_wv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfwsub_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_vf_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_wv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfwsub_wf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwsub_wf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfwsub_wf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwsub_wf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfwmul_vv_f32m2(op1, op2, vl) __riscv_th_vfwmul_vv_f32m2(op1, op2, vl) #define __riscv_vfwmul_vf_f32m2(op1, op2, vl) __riscv_th_vfwmul_vf_f32m2(op1, op2, vl) #define __riscv_vfwmul_vv_f32m4(op1, op2, vl) __riscv_th_vfwmul_vv_f32m4(op1, op2, vl) @@ -3489,18 +3489,18 @@ let HeaderCode = #define __riscv_vfwmul_vf_f64m4(op1, op2, vl) __riscv_th_vfwmul_vf_f64m4(op1, op2, vl) #define __riscv_vfwmul_vv_f64m8(op1, op2, vl) __riscv_th_vfwmul_vv_f64m8(op1, op2, vl) #define __riscv_vfwmul_vf_f64m8(op1, op2, vl) __riscv_th_vfwmul_vf_f64m8(op1, op2, vl) -#define __riscv_vfwmul_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwmul_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfwmul_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwmul_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfwmul_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwmul_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfwmul_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwmul_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfwmul_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwmul_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfwmul_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwmul_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfwmul_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfwmul_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfwmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfwmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfwmul_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfmacc_vv_f16m1(vd, vs1, vs2, vl) __riscv_th_vfmacc_vv_f16m1(vd, vs1, vs2, vl) #define __riscv_vfmacc_vf_f16m1(vd, rs1, vs2, vl) __riscv_th_vfmacc_vf_f16m1(vd, rs1, vs2, vl) @@ -3995,18 +3995,18 @@ let HeaderCode = #define __riscv_vfabs_v_f64m2(op1, vl) __riscv_th_vfabs_v_f64m2(op1, vl) #define __riscv_vfabs_v_f64m4(op1, vl) __riscv_th_vfabs_v_f64m4(op1, vl) #define __riscv_vfabs_v_f64m8(op1, vl) __riscv_th_vfabs_v_f64m8(op1, vl) -#define __riscv_vfabs_v_f16m1_m(mask, op1, vl) __riscv_th_vfabs_v_f16m1_m(mask, op1, vl) -#define __riscv_vfabs_v_f16m2_m(mask, op1, vl) __riscv_th_vfabs_v_f16m2_m(mask, op1, vl) -#define __riscv_vfabs_v_f16m4_m(mask, op1, vl) __riscv_th_vfabs_v_f16m4_m(mask, op1, vl) -#define __riscv_vfabs_v_f16m8_m(mask, op1, vl) __riscv_th_vfabs_v_f16m8_m(mask, op1, vl) -#define __riscv_vfabs_v_f32m1_m(mask, op1, vl) __riscv_th_vfabs_v_f32m1_m(mask, op1, vl) -#define __riscv_vfabs_v_f32m2_m(mask, op1, vl) __riscv_th_vfabs_v_f32m2_m(mask, op1, vl) -#define __riscv_vfabs_v_f32m4_m(mask, op1, vl) __riscv_th_vfabs_v_f32m4_m(mask, op1, vl) -#define __riscv_vfabs_v_f32m8_m(mask, op1, vl) __riscv_th_vfabs_v_f32m8_m(mask, op1, vl) -#define __riscv_vfabs_v_f64m1_m(mask, op1, vl) __riscv_th_vfabs_v_f64m1_m(mask, op1, vl) -#define __riscv_vfabs_v_f64m2_m(mask, op1, vl) __riscv_th_vfabs_v_f64m2_m(mask, op1, vl) -#define __riscv_vfabs_v_f64m4_m(mask, op1, vl) __riscv_th_vfabs_v_f64m4_m(mask, op1, vl) -#define __riscv_vfabs_v_f64m8_m(mask, op1, vl) __riscv_th_vfabs_v_f64m8_m(mask, op1, vl) +#define __riscv_vfabs_v_f16m1_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f16m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f16m2_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f16m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f16m4_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f16m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f16m8_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f16m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f32m1_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f32m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f32m2_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f32m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f32m4_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f32m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f32m8_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f32m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f64m1_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f64m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f64m2_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f64m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f64m4_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f64m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfabs_v_f64m8_m(mask, maskedoff, op1, vl) __riscv_th_vfabs_v_f64m8_mu(mask, maskedoff, op1, vl) #define __riscv_vfmax_vv_f16m1(op1, op2, vl) __riscv_th_vfmax_vv_f16m1(op1, op2, vl) #define __riscv_vfmax_vf_f16m1(op1, op2, vl) __riscv_th_vfmax_vf_f16m1(op1, op2, vl) #define __riscv_vfmax_vv_f16m2(op1, op2, vl) __riscv_th_vfmax_vv_f16m2(op1, op2, vl) @@ -4031,30 +4031,30 @@ let HeaderCode = #define __riscv_vfmax_vf_f64m4(op1, op2, vl) __riscv_th_vfmax_vf_f64m4(op1, op2, vl) #define __riscv_vfmax_vv_f64m8(op1, op2, vl) __riscv_th_vfmax_vv_f64m8(op1, op2, vl) #define __riscv_vfmax_vf_f64m8(op1, op2, vl) __riscv_th_vfmax_vf_f64m8(op1, op2, vl) -#define __riscv_vfmax_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfmax_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfmax_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfmax_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfmax_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmax_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfmin_vv_f16m1(op1, op2, vl) __riscv_th_vfmin_vv_f16m1(op1, op2, vl) #define __riscv_vfmin_vf_f16m1(op1, op2, vl) __riscv_th_vfmin_vf_f16m1(op1, op2, vl) #define __riscv_vfmin_vv_f16m2(op1, op2, vl) __riscv_th_vfmin_vv_f16m2(op1, op2, vl) @@ -4079,30 +4079,30 @@ let HeaderCode = #define __riscv_vfmin_vf_f64m4(op1, op2, vl) __riscv_th_vfmin_vf_f64m4(op1, op2, vl) #define __riscv_vfmin_vv_f64m8(op1, op2, vl) __riscv_th_vfmin_vv_f64m8(op1, op2, vl) #define __riscv_vfmin_vf_f64m8(op1, op2, vl) __riscv_th_vfmin_vf_f64m8(op1, op2, vl) -#define __riscv_vfmin_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfmin_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfmin_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfmin_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfmin_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfmin_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfsgnj_vv_f16m1(op1, op2, vl) __riscv_th_vfsgnj_vv_f16m1(op1, op2, vl) #define __riscv_vfsgnj_vf_f16m1(op1, op2, vl) __riscv_th_vfsgnj_vf_f16m1(op1, op2, vl) #define __riscv_vfsgnj_vv_f16m2(op1, op2, vl) __riscv_th_vfsgnj_vv_f16m2(op1, op2, vl) @@ -4127,30 +4127,30 @@ let HeaderCode = #define __riscv_vfsgnj_vf_f64m4(op1, op2, vl) __riscv_th_vfsgnj_vf_f64m4(op1, op2, vl) #define __riscv_vfsgnj_vv_f64m8(op1, op2, vl) __riscv_th_vfsgnj_vv_f64m8(op1, op2, vl) #define __riscv_vfsgnj_vf_f64m8(op1, op2, vl) __riscv_th_vfsgnj_vf_f64m8(op1, op2, vl) -#define __riscv_vfsgnj_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnj_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfsgnj_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnj_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnj_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfsgnjn_vv_f16m1(op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m1(op1, op2, vl) #define __riscv_vfsgnjn_vf_f16m1(op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m1(op1, op2, vl) #define __riscv_vfsgnjn_vv_f16m2(op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m2(op1, op2, vl) @@ -4175,30 +4175,30 @@ let HeaderCode = #define __riscv_vfsgnjn_vf_f64m4(op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m4(op1, op2, vl) #define __riscv_vfsgnjn_vv_f64m8(op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m8(op1, op2, vl) #define __riscv_vfsgnjn_vf_f64m8(op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m8(op1, op2, vl) -#define __riscv_vfsgnjn_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjn_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjn_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjn_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfsgnjx_vv_f16m1(op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m1(op1, op2, vl) #define __riscv_vfsgnjx_vf_f16m1(op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m1(op1, op2, vl) #define __riscv_vfsgnjx_vv_f16m2(op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m2(op1, op2, vl) @@ -4223,30 +4223,30 @@ let HeaderCode = #define __riscv_vfsgnjx_vf_f64m4(op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m4(op1, op2, vl) #define __riscv_vfsgnjx_vv_f64m8(op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m8(op1, op2, vl) #define __riscv_vfsgnjx_vf_f64m8(op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m8(op1, op2, vl) -#define __riscv_vfsgnjx_vv_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f16m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f16m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f16m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f16m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f32m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f32m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f32m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f32m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f64m1_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m1_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f64m2_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m2_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f64m4_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m4_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vv_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m8_m(mask, op1, op2, vl) -#define __riscv_vfsgnjx_vf_f64m8_m(mask, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m8_m(mask, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f16m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f16m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f16m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f16m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f16m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f32m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f32m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f32m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f32m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f32m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f64m1_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m1_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f64m2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f64m4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vv_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vv_f64m8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vfsgnjx_vf_f64m8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vfsgnjx_vf_f64m8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfneg_v_f16m1(op1, vl) __riscv_th_vfneg_v_f16m1(op1, vl) #define __riscv_vfneg_v_f16m2(op1, vl) __riscv_th_vfneg_v_f16m2(op1, vl) #define __riscv_vfneg_v_f16m4(op1, vl) __riscv_th_vfneg_v_f16m4(op1, vl) @@ -4259,18 +4259,18 @@ let HeaderCode = #define __riscv_vfneg_v_f64m2(op1, vl) __riscv_th_vfneg_v_f64m2(op1, vl) #define __riscv_vfneg_v_f64m4(op1, vl) __riscv_th_vfneg_v_f64m4(op1, vl) #define __riscv_vfneg_v_f64m8(op1, vl) __riscv_th_vfneg_v_f64m8(op1, vl) -#define __riscv_vfneg_v_f16m1_m(mask, op1, vl) __riscv_th_vfneg_v_f16m1_m(mask, op1, vl) -#define __riscv_vfneg_v_f16m2_m(mask, op1, vl) __riscv_th_vfneg_v_f16m2_m(mask, op1, vl) -#define __riscv_vfneg_v_f16m4_m(mask, op1, vl) __riscv_th_vfneg_v_f16m4_m(mask, op1, vl) -#define __riscv_vfneg_v_f16m8_m(mask, op1, vl) __riscv_th_vfneg_v_f16m8_m(mask, op1, vl) -#define __riscv_vfneg_v_f32m1_m(mask, op1, vl) __riscv_th_vfneg_v_f32m1_m(mask, op1, vl) -#define __riscv_vfneg_v_f32m2_m(mask, op1, vl) __riscv_th_vfneg_v_f32m2_m(mask, op1, vl) -#define __riscv_vfneg_v_f32m4_m(mask, op1, vl) __riscv_th_vfneg_v_f32m4_m(mask, op1, vl) -#define __riscv_vfneg_v_f32m8_m(mask, op1, vl) __riscv_th_vfneg_v_f32m8_m(mask, op1, vl) -#define __riscv_vfneg_v_f64m1_m(mask, op1, vl) __riscv_th_vfneg_v_f64m1_m(mask, op1, vl) -#define __riscv_vfneg_v_f64m2_m(mask, op1, vl) __riscv_th_vfneg_v_f64m2_m(mask, op1, vl) -#define __riscv_vfneg_v_f64m4_m(mask, op1, vl) __riscv_th_vfneg_v_f64m4_m(mask, op1, vl) -#define __riscv_vfneg_v_f64m8_m(mask, op1, vl) __riscv_th_vfneg_v_f64m8_m(mask, op1, vl) +#define __riscv_vfneg_v_f16m1_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f16m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f16m2_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f16m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f16m4_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f16m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f16m8_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f16m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f32m1_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f32m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f32m2_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f32m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f32m4_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f32m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f32m8_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f32m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f64m1_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f64m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f64m2_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f64m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f64m4_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f64m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfneg_v_f64m8_m(mask, maskedoff, op1, vl) __riscv_th_vfneg_v_f64m8_mu(mask, maskedoff, op1, vl) #define __riscv_vfsqrt_v_f16m1(op1, vl) __riscv_th_vfsqrt_v_f16m1(op1, vl) #define __riscv_vfsqrt_v_f16m2(op1, vl) __riscv_th_vfsqrt_v_f16m2(op1, vl) #define __riscv_vfsqrt_v_f16m4(op1, vl) __riscv_th_vfsqrt_v_f16m4(op1, vl) @@ -4283,18 +4283,18 @@ let HeaderCode = #define __riscv_vfsqrt_v_f64m2(op1, vl) __riscv_th_vfsqrt_v_f64m2(op1, vl) #define __riscv_vfsqrt_v_f64m4(op1, vl) __riscv_th_vfsqrt_v_f64m4(op1, vl) #define __riscv_vfsqrt_v_f64m8(op1, vl) __riscv_th_vfsqrt_v_f64m8(op1, vl) -#define __riscv_vfsqrt_v_f16m1_m(mask, op1, vl) __riscv_th_vfsqrt_v_f16m1_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f16m2_m(mask, op1, vl) __riscv_th_vfsqrt_v_f16m2_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f16m4_m(mask, op1, vl) __riscv_th_vfsqrt_v_f16m4_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f16m8_m(mask, op1, vl) __riscv_th_vfsqrt_v_f16m8_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f32m1_m(mask, op1, vl) __riscv_th_vfsqrt_v_f32m1_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f32m2_m(mask, op1, vl) __riscv_th_vfsqrt_v_f32m2_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f32m4_m(mask, op1, vl) __riscv_th_vfsqrt_v_f32m4_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f32m8_m(mask, op1, vl) __riscv_th_vfsqrt_v_f32m8_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f64m1_m(mask, op1, vl) __riscv_th_vfsqrt_v_f64m1_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f64m2_m(mask, op1, vl) __riscv_th_vfsqrt_v_f64m2_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f64m4_m(mask, op1, vl) __riscv_th_vfsqrt_v_f64m4_m(mask, op1, vl) -#define __riscv_vfsqrt_v_f64m8_m(mask, op1, vl) __riscv_th_vfsqrt_v_f64m8_m(mask, op1, vl) +#define __riscv_vfsqrt_v_f16m1_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f16m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f16m2_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f16m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f16m4_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f16m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f16m8_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f16m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f32m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f32m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f32m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f32m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f64m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f64m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f64m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl) __riscv_th_vfsqrt_v_f64m8_mu(mask, maskedoff, op1, vl) #define __riscv_vfclass_v_u16m1(op1, vl) __riscv_th_vfclass_v_u16m1(op1, vl) #define __riscv_vfclass_v_u16m2(op1, vl) __riscv_th_vfclass_v_u16m2(op1, vl) #define __riscv_vfclass_v_u16m4(op1, vl) __riscv_th_vfclass_v_u16m4(op1, vl) @@ -4307,18 +4307,18 @@ let HeaderCode = #define __riscv_vfclass_v_u64m2(op1, vl) __riscv_th_vfclass_v_u64m2(op1, vl) #define __riscv_vfclass_v_u64m4(op1, vl) __riscv_th_vfclass_v_u64m4(op1, vl) #define __riscv_vfclass_v_u64m8(op1, vl) __riscv_th_vfclass_v_u64m8(op1, vl) -#define __riscv_vfclass_v_u16m1_m(mask, op1, vl) __riscv_th_vfclass_v_u16m1_m(mask, op1, vl) -#define __riscv_vfclass_v_u16m2_m(mask, op1, vl) __riscv_th_vfclass_v_u16m2_m(mask, op1, vl) -#define __riscv_vfclass_v_u16m4_m(mask, op1, vl) __riscv_th_vfclass_v_u16m4_m(mask, op1, vl) -#define __riscv_vfclass_v_u16m8_m(mask, op1, vl) __riscv_th_vfclass_v_u16m8_m(mask, op1, vl) -#define __riscv_vfclass_v_u32m1_m(mask, op1, vl) __riscv_th_vfclass_v_u32m1_m(mask, op1, vl) -#define __riscv_vfclass_v_u32m2_m(mask, op1, vl) __riscv_th_vfclass_v_u32m2_m(mask, op1, vl) -#define __riscv_vfclass_v_u32m4_m(mask, op1, vl) __riscv_th_vfclass_v_u32m4_m(mask, op1, vl) -#define __riscv_vfclass_v_u32m8_m(mask, op1, vl) __riscv_th_vfclass_v_u32m8_m(mask, op1, vl) -#define __riscv_vfclass_v_u64m1_m(mask, op1, vl) __riscv_th_vfclass_v_u64m1_m(mask, op1, vl) -#define __riscv_vfclass_v_u64m2_m(mask, op1, vl) __riscv_th_vfclass_v_u64m2_m(mask, op1, vl) -#define __riscv_vfclass_v_u64m4_m(mask, op1, vl) __riscv_th_vfclass_v_u64m4_m(mask, op1, vl) -#define __riscv_vfclass_v_u64m8_m(mask, op1, vl) __riscv_th_vfclass_v_u64m8_m(mask, op1, vl) +#define __riscv_vfclass_v_u16m1_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u16m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u16m2_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u16m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u16m4_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u16m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u16m8_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u16m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u32m1_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u32m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u32m2_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u32m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u32m4_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u32m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u32m8_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u32m8_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u64m1_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u64m1_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u64m2_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u64m2_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u64m4_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u64m4_mu(mask, maskedoff, op1, vl) +#define __riscv_vfclass_v_u64m8_m(mask, maskedoff, op1, vl) __riscv_th_vfclass_v_u64m8_mu(mask, maskedoff, op1, vl) #define __riscv_vfmerge_vfm_f16m1(op1, op2, mask, vl) __riscv_th_vfmerge_vfm_f16m1(op1, op2, mask, vl) #define __riscv_vfmerge_vfm_f16m2(op1, op2, mask, vl) __riscv_th_vfmerge_vfm_f16m2(op1, op2, mask, vl) #define __riscv_vfmerge_vfm_f16m4(op1, op2, mask, vl) __riscv_th_vfmerge_vfm_f16m4(op1, op2, mask, vl) @@ -4367,30 +4367,30 @@ let HeaderCode = #define __riscv_vmfeq_vf_f64m4_b16(op1, op2, vl) __riscv_th_vmfeq_vf_f64m4_b16(op1, op2, vl) #define __riscv_vmfeq_vv_f64m8_b8(op1, op2, vl) __riscv_th_vmfeq_vv_f64m8_b8(op1, op2, vl) #define __riscv_vmfeq_vf_f64m8_b8(op1, op2, vl) __riscv_th_vmfeq_vf_f64m8_b8(op1, op2, vl) -#define __riscv_vmfeq_vv_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vv_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfeq_vv_f64m8_b8_m(mask, op1, op2, vl) -#define __riscv_vmfeq_vf_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfeq_vf_f64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmfeq_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfeq_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmfne_vv_f16m1_b16(op1, op2, vl) __riscv_th_vmfne_vv_f16m1_b16(op1, op2, vl) #define __riscv_vmfne_vf_f16m1_b16(op1, op2, vl) __riscv_th_vmfne_vf_f16m1_b16(op1, op2, vl) #define __riscv_vmfne_vv_f16m2_b8(op1, op2, vl) __riscv_th_vmfne_vv_f16m2_b8(op1, op2, vl) @@ -4415,30 +4415,30 @@ let HeaderCode = #define __riscv_vmfne_vf_f64m4_b16(op1, op2, vl) __riscv_th_vmfne_vf_f64m4_b16(op1, op2, vl) #define __riscv_vmfne_vv_f64m8_b8(op1, op2, vl) __riscv_th_vmfne_vv_f64m8_b8(op1, op2, vl) #define __riscv_vmfne_vf_f64m8_b8(op1, op2, vl) __riscv_th_vmfne_vf_f64m8_b8(op1, op2, vl) -#define __riscv_vmfne_vv_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfne_vv_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfne_vv_f64m8_b8_m(mask, op1, op2, vl) -#define __riscv_vmfne_vf_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfne_vf_f64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmfne_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfne_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmflt_vv_f16m1_b16(op1, op2, vl) __riscv_th_vmflt_vv_f16m1_b16(op1, op2, vl) #define __riscv_vmflt_vf_f16m1_b16(op1, op2, vl) __riscv_th_vmflt_vf_f16m1_b16(op1, op2, vl) #define __riscv_vmflt_vv_f16m2_b8(op1, op2, vl) __riscv_th_vmflt_vv_f16m2_b8(op1, op2, vl) @@ -4463,30 +4463,30 @@ let HeaderCode = #define __riscv_vmflt_vf_f64m4_b16(op1, op2, vl) __riscv_th_vmflt_vf_f64m4_b16(op1, op2, vl) #define __riscv_vmflt_vv_f64m8_b8(op1, op2, vl) __riscv_th_vmflt_vv_f64m8_b8(op1, op2, vl) #define __riscv_vmflt_vf_f64m8_b8(op1, op2, vl) __riscv_th_vmflt_vf_f64m8_b8(op1, op2, vl) -#define __riscv_vmflt_vv_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmflt_vv_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmflt_vv_f64m8_b8_m(mask, op1, op2, vl) -#define __riscv_vmflt_vf_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmflt_vf_f64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmflt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmflt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmfle_vv_f16m1_b16(op1, op2, vl) __riscv_th_vmfle_vv_f16m1_b16(op1, op2, vl) #define __riscv_vmfle_vf_f16m1_b16(op1, op2, vl) __riscv_th_vmfle_vf_f16m1_b16(op1, op2, vl) #define __riscv_vmfle_vv_f16m2_b8(op1, op2, vl) __riscv_th_vmfle_vv_f16m2_b8(op1, op2, vl) @@ -4511,30 +4511,30 @@ let HeaderCode = #define __riscv_vmfle_vf_f64m4_b16(op1, op2, vl) __riscv_th_vmfle_vf_f64m4_b16(op1, op2, vl) #define __riscv_vmfle_vv_f64m8_b8(op1, op2, vl) __riscv_th_vmfle_vv_f64m8_b8(op1, op2, vl) #define __riscv_vmfle_vf_f64m8_b8(op1, op2, vl) __riscv_th_vmfle_vf_f64m8_b8(op1, op2, vl) -#define __riscv_vmfle_vv_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfle_vv_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfle_vv_f64m8_b8_m(mask, op1, op2, vl) -#define __riscv_vmfle_vf_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfle_vf_f64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmfle_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfle_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmfgt_vv_f16m1_b16(op1, op2, vl) __riscv_th_vmfgt_vv_f16m1_b16(op1, op2, vl) #define __riscv_vmfgt_vf_f16m1_b16(op1, op2, vl) __riscv_th_vmfgt_vf_f16m1_b16(op1, op2, vl) #define __riscv_vmfgt_vv_f16m2_b8(op1, op2, vl) __riscv_th_vmfgt_vv_f16m2_b8(op1, op2, vl) @@ -4559,30 +4559,30 @@ let HeaderCode = #define __riscv_vmfgt_vf_f64m4_b16(op1, op2, vl) __riscv_th_vmfgt_vf_f64m4_b16(op1, op2, vl) #define __riscv_vmfgt_vv_f64m8_b8(op1, op2, vl) __riscv_th_vmfgt_vv_f64m8_b8(op1, op2, vl) #define __riscv_vmfgt_vf_f64m8_b8(op1, op2, vl) __riscv_th_vmfgt_vf_f64m8_b8(op1, op2, vl) -#define __riscv_vmfgt_vv_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vv_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfgt_vv_f64m8_b8_m(mask, op1, op2, vl) -#define __riscv_vmfgt_vf_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfgt_vf_f64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmfgt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfgt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfgt_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vmfge_vv_f16m1_b16(op1, op2, vl) __riscv_th_vmfge_vv_f16m1_b16(op1, op2, vl) #define __riscv_vmfge_vf_f16m1_b16(op1, op2, vl) __riscv_th_vmfge_vf_f16m1_b16(op1, op2, vl) #define __riscv_vmfge_vv_f16m2_b8(op1, op2, vl) __riscv_th_vmfge_vv_f16m2_b8(op1, op2, vl) @@ -4607,30 +4607,30 @@ let HeaderCode = #define __riscv_vmfge_vf_f64m4_b16(op1, op2, vl) __riscv_th_vmfge_vf_f64m4_b16(op1, op2, vl) #define __riscv_vmfge_vv_f64m8_b8(op1, op2, vl) __riscv_th_vmfge_vv_f64m8_b8(op1, op2, vl) #define __riscv_vmfge_vf_f64m8_b8(op1, op2, vl) __riscv_th_vmfge_vf_f64m8_b8(op1, op2, vl) -#define __riscv_vmfge_vv_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f16m1_b16_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f16m1_b16_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f16m2_b8_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f16m2_b8_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f16m4_b4_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f16m4_b4_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f16m8_b2_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f16m8_b2_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f32m1_b32_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f32m1_b32_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f32m2_b16_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f32m2_b16_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f32m4_b8_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f32m4_b8_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f32m8_b4_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f32m8_b4_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f64m1_b64_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f64m1_b64_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f64m2_b32_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f64m2_b32_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f64m4_b16_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f64m4_b16_m(mask, op1, op2, vl) -#define __riscv_vmfge_vv_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfge_vv_f64m8_b8_m(mask, op1, op2, vl) -#define __riscv_vmfge_vf_f64m8_b8_m(mask, op1, op2, vl) __riscv_th_vmfge_vf_f64m8_b8_m(mask, op1, op2, vl) +#define __riscv_vmfge_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f16m1_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f16m2_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f16m4_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f16m8_b2_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f32m1_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f32m2_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f32m4_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f32m8_b4_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f64m1_b64_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f64m2_b32_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f64m4_b16_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vv_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) +#define __riscv_vmfge_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl) __riscv_th_vmfge_vf_f64m8_b8_mu(mask, maskedoff, op1, op2, vl) #define __riscv_vfcvt_x_f_v_i16m1(src, vl) __riscv_th_vfcvt_x_f_v_i16m1(src, vl) #define __riscv_vfcvt_x_f_v_i16m2(src, vl) __riscv_th_vfcvt_x_f_v_i16m2(src, vl) @@ -4680,54 +4680,54 @@ let HeaderCode = #define __riscv_vfcvt_f_xu_v_f64m2(src, vl) __riscv_th_vfcvt_f_xu_v_f64m2(src, vl) #define __riscv_vfcvt_f_xu_v_f64m4(src, vl) __riscv_th_vfcvt_f_xu_v_f64m4(src, vl) #define __riscv_vfcvt_f_xu_v_f64m8(src, vl) __riscv_th_vfcvt_f_xu_v_f64m8(src, vl) -#define __riscv_vfcvt_x_f_v_i16m1_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i16m1_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i16m2_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i16m2_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i16m4_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i16m4_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i16m8_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i16m8_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u16m1_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u16m1_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u16m2_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u16m2_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u16m4_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u16m4_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u16m8_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u16m8_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f16m1_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f16m1_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f16m2_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f16m2_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f16m4_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f16m4_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f16m8_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f16m8_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f16m1_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f16m1_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f16m2_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f16m2_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f16m4_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f16m4_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f16m8_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f16m8_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i32m1_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i32m1_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i32m2_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i32m2_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i32m4_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i32m4_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i32m8_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i32m8_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u32m1_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u32m1_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u32m2_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u32m2_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u32m4_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u32m4_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u32m8_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u32m8_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f32m1_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f32m1_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f32m2_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f32m2_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f32m4_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f32m4_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f32m8_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f32m8_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f32m1_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f32m1_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f32m2_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f32m2_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f32m4_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f32m4_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f32m8_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f32m8_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i64m1_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i64m1_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i64m2_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i64m2_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i64m4_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i64m4_m(mask, src, vl) -#define __riscv_vfcvt_x_f_v_i64m8_m(mask, src, vl) __riscv_th_vfcvt_x_f_v_i64m8_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u64m1_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u64m1_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u64m2_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u64m2_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u64m4_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u64m4_m(mask, src, vl) -#define __riscv_vfcvt_xu_f_v_u64m8_m(mask, src, vl) __riscv_th_vfcvt_xu_f_v_u64m8_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f64m1_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f64m1_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f64m2_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f64m2_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f64m4_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f64m4_m(mask, src, vl) -#define __riscv_vfcvt_f_x_v_f64m8_m(mask, src, vl) __riscv_th_vfcvt_f_x_v_f64m8_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f64m1_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f64m1_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f64m2_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f64m2_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f64m4_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f64m4_m(mask, src, vl) -#define __riscv_vfcvt_f_xu_v_f64m8_m(mask, src, vl) __riscv_th_vfcvt_f_xu_v_f64m8_m(mask, src, vl) +#define __riscv_vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i16m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u16m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i64m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u64m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f64m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f64m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl) __riscv_th_vfcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl) #define __riscv_vfncvt_x_f_w_i8m1(src, vl) __riscv_th_vfncvt_x_f_w_i8m1(src, vl) #define __riscv_vfncvt_x_f_w_i8m2(src, vl) __riscv_th_vfncvt_x_f_w_i8m2(src, vl) #define __riscv_vfncvt_x_f_w_i8m4(src, vl) __riscv_th_vfncvt_x_f_w_i8m4(src, vl) @@ -4764,42 +4764,42 @@ let HeaderCode = #define __riscv_vfncvt_f_f_w_f32m1(src, vl) __riscv_th_vfncvt_f_f_w_f32m1(src, vl) #define __riscv_vfncvt_f_f_w_f32m2(src, vl) __riscv_th_vfncvt_f_f_w_f32m2(src, vl) #define __riscv_vfncvt_f_f_w_f32m4(src, vl) __riscv_th_vfncvt_f_f_w_f32m4(src, vl) -#define __riscv_vfncvt_x_f_w_i8m1_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i8m1_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i8m2_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i8m2_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i8m4_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i8m4_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u8m1_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u8m1_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u8m2_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u8m2_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u8m4_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u8m4_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i16m1_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i16m1_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i16m2_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i16m2_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i16m4_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i16m4_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u16m1_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u16m1_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u16m2_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u16m2_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u16m4_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u16m4_m(mask, src, vl) -#define __riscv_vfncvt_f_x_w_f16m1_m(mask, src, vl) __riscv_th_vfncvt_f_x_w_f16m1_m(mask, src, vl) -#define __riscv_vfncvt_f_x_w_f16m2_m(mask, src, vl) __riscv_th_vfncvt_f_x_w_f16m2_m(mask, src, vl) -#define __riscv_vfncvt_f_x_w_f16m4_m(mask, src, vl) __riscv_th_vfncvt_f_x_w_f16m4_m(mask, src, vl) -#define __riscv_vfncvt_f_xu_w_f16m1_m(mask, src, vl) __riscv_th_vfncvt_f_xu_w_f16m1_m(mask, src, vl) -#define __riscv_vfncvt_f_xu_w_f16m2_m(mask, src, vl) __riscv_th_vfncvt_f_xu_w_f16m2_m(mask, src, vl) -#define __riscv_vfncvt_f_xu_w_f16m4_m(mask, src, vl) __riscv_th_vfncvt_f_xu_w_f16m4_m(mask, src, vl) -#define __riscv_vfncvt_f_f_w_f16m1_m(mask, src, vl) __riscv_th_vfncvt_f_f_w_f16m1_m(mask, src, vl) -#define __riscv_vfncvt_f_f_w_f16m2_m(mask, src, vl) __riscv_th_vfncvt_f_f_w_f16m2_m(mask, src, vl) -#define __riscv_vfncvt_f_f_w_f16m4_m(mask, src, vl) __riscv_th_vfncvt_f_f_w_f16m4_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i32m1_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i32m1_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i32m2_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i32m2_m(mask, src, vl) -#define __riscv_vfncvt_x_f_w_i32m4_m(mask, src, vl) __riscv_th_vfncvt_x_f_w_i32m4_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u32m1_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u32m1_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u32m2_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u32m2_m(mask, src, vl) -#define __riscv_vfncvt_xu_f_w_u32m4_m(mask, src, vl) __riscv_th_vfncvt_xu_f_w_u32m4_m(mask, src, vl) -#define __riscv_vfncvt_f_x_w_f32m1_m(mask, src, vl) __riscv_th_vfncvt_f_x_w_f32m1_m(mask, src, vl) -#define __riscv_vfncvt_f_x_w_f32m2_m(mask, src, vl) __riscv_th_vfncvt_f_x_w_f32m2_m(mask, src, vl) -#define __riscv_vfncvt_f_x_w_f32m4_m(mask, src, vl) __riscv_th_vfncvt_f_x_w_f32m4_m(mask, src, vl) -#define __riscv_vfncvt_f_xu_w_f32m1_m(mask, src, vl) __riscv_th_vfncvt_f_xu_w_f32m1_m(mask, src, vl) -#define __riscv_vfncvt_f_xu_w_f32m2_m(mask, src, vl) __riscv_th_vfncvt_f_xu_w_f32m2_m(mask, src, vl) -#define __riscv_vfncvt_f_xu_w_f32m4_m(mask, src, vl) __riscv_th_vfncvt_f_xu_w_f32m4_m(mask, src, vl) -#define __riscv_vfncvt_f_f_w_f32m1_m(mask, src, vl) __riscv_th_vfncvt_f_f_w_f32m1_m(mask, src, vl) -#define __riscv_vfncvt_f_f_w_f32m2_m(mask, src, vl) __riscv_th_vfncvt_f_f_w_f32m2_m(mask, src, vl) -#define __riscv_vfncvt_f_f_w_f32m4_m(mask, src, vl) __riscv_th_vfncvt_f_f_w_f32m4_m(mask, src, vl) +#define __riscv_vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i8m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i8m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i8m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u8m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u8m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u8m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_x_w_f16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_x_w_f16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_x_w_f16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_xu_w_f16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_xu_w_f16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_xu_w_f16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_f_w_f16m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_f_w_f16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_f_w_f16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_x_f_w_i32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_xu_f_w_u32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_x_w_f32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_x_w_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_x_w_f32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_xu_w_f32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_xu_w_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_xu_w_f32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_f_w_f32m1_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_f_w_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfncvt_f_f_w_f32m4_mu(mask, maskedoff, src, vl) #define __riscv_vfwcvt_f_x_v_f16m2(src, vl) __riscv_th_vfwcvt_f_x_v_f16m2(src, vl) #define __riscv_vfwcvt_f_x_v_f16m4(src, vl) __riscv_th_vfwcvt_f_x_v_f16m4(src, vl) #define __riscv_vfwcvt_f_x_v_f16m8(src, vl) __riscv_th_vfwcvt_f_x_v_f16m8(src, vl) @@ -4824,30 +4824,30 @@ let HeaderCode = #define __riscv_vfwcvt_f_f_v_f64m2(src, vl) __riscv_th_vfwcvt_f_f_v_f64m2(src, vl) #define __riscv_vfwcvt_f_f_v_f64m4(src, vl) __riscv_th_vfwcvt_f_f_v_f64m4(src, vl) #define __riscv_vfwcvt_f_f_v_f64m8(src, vl) __riscv_th_vfwcvt_f_f_v_f64m8(src, vl) -#define __riscv_vfwcvt_f_x_v_f16m2_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f16m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f16m4_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f16m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f16m8_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f16m8_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f16m2_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f16m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f16m4_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f16m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f16m8_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f16m8_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f32m2_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f32m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f32m4_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f32m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f32m8_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f32m8_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f32m2_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f32m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f32m4_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f32m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f32m8_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f32m8_m(mask, src, vl) -#define __riscv_vfwcvt_f_f_v_f32m2_m(mask, src, vl) __riscv_th_vfwcvt_f_f_v_f32m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_f_v_f32m4_m(mask, src, vl) __riscv_th_vfwcvt_f_f_v_f32m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_f_v_f32m8_m(mask, src, vl) __riscv_th_vfwcvt_f_f_v_f32m8_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f64m2_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f64m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f64m4_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f64m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_x_v_f64m8_m(mask, src, vl) __riscv_th_vfwcvt_f_x_v_f64m8_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f64m2_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f64m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f64m4_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f64m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_xu_v_f64m8_m(mask, src, vl) __riscv_th_vfwcvt_f_xu_v_f64m8_m(mask, src, vl) -#define __riscv_vfwcvt_f_f_v_f64m2_m(mask, src, vl) __riscv_th_vfwcvt_f_f_v_f64m2_m(mask, src, vl) -#define __riscv_vfwcvt_f_f_v_f64m4_m(mask, src, vl) __riscv_th_vfwcvt_f_f_v_f64m4_m(mask, src, vl) -#define __riscv_vfwcvt_f_f_v_f64m8_m(mask, src, vl) __riscv_th_vfwcvt_f_f_v_f64m8_m(mask, src, vl) +#define __riscv_vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f16m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f16m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f16m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f16m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_f_v_f32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_f_v_f32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_f_v_f32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_x_v_f64m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_xu_v_f64m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_f_v_f64m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_f_v_f64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_f_v_f64m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_f_v_f64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_f_f_v_f64m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_f_f_v_f64m8_mu(mask, maskedoff, src, vl) #define __riscv_vfwcvt_x_f_v_i32m2(src, vl) __riscv_th_vfwcvt_x_f_v_i32m2(src, vl) #define __riscv_vfwcvt_x_f_v_i32m4(src, vl) __riscv_th_vfwcvt_x_f_v_i32m4(src, vl) #define __riscv_vfwcvt_x_f_v_i32m8(src, vl) __riscv_th_vfwcvt_x_f_v_i32m8(src, vl) @@ -4860,18 +4860,18 @@ let HeaderCode = #define __riscv_vfwcvt_xu_f_v_u64m2(src, vl) __riscv_th_vfwcvt_xu_f_v_u64m2(src, vl) #define __riscv_vfwcvt_xu_f_v_u64m4(src, vl) __riscv_th_vfwcvt_xu_f_v_u64m4(src, vl) #define __riscv_vfwcvt_xu_f_v_u64m8(src, vl) __riscv_th_vfwcvt_xu_f_v_u64m8(src, vl) -#define __riscv_vfwcvt_x_f_v_i32m2_m(mask, src, vl) __riscv_th_vfwcvt_x_f_v_i32m2_m(mask, src, vl) -#define __riscv_vfwcvt_x_f_v_i32m4_m(mask, src, vl) __riscv_th_vfwcvt_x_f_v_i32m4_m(mask, src, vl) -#define __riscv_vfwcvt_x_f_v_i32m8_m(mask, src, vl) __riscv_th_vfwcvt_x_f_v_i32m8_m(mask, src, vl) -#define __riscv_vfwcvt_xu_f_v_u32m2_m(mask, src, vl) __riscv_th_vfwcvt_xu_f_v_u32m2_m(mask, src, vl) -#define __riscv_vfwcvt_xu_f_v_u32m4_m(mask, src, vl) __riscv_th_vfwcvt_xu_f_v_u32m4_m(mask, src, vl) -#define __riscv_vfwcvt_xu_f_v_u32m8_m(mask, src, vl) __riscv_th_vfwcvt_xu_f_v_u32m8_m(mask, src, vl) -#define __riscv_vfwcvt_x_f_v_i64m2_m(mask, src, vl) __riscv_th_vfwcvt_x_f_v_i64m2_m(mask, src, vl) -#define __riscv_vfwcvt_x_f_v_i64m4_m(mask, src, vl) __riscv_th_vfwcvt_x_f_v_i64m4_m(mask, src, vl) -#define __riscv_vfwcvt_x_f_v_i64m8_m(mask, src, vl) __riscv_th_vfwcvt_x_f_v_i64m8_m(mask, src, vl) -#define __riscv_vfwcvt_xu_f_v_u64m2_m(mask, src, vl) __riscv_th_vfwcvt_xu_f_v_u64m2_m(mask, src, vl) -#define __riscv_vfwcvt_xu_f_v_u64m4_m(mask, src, vl) __riscv_th_vfwcvt_xu_f_v_u64m4_m(mask, src, vl) -#define __riscv_vfwcvt_xu_f_v_u64m8_m(mask, src, vl) __riscv_th_vfwcvt_xu_f_v_u64m8_m(mask, src, vl) +#define __riscv_vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_x_f_v_i32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_x_f_v_i32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_x_f_v_i32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_xu_f_v_u32m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_xu_f_v_u32m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_xu_f_v_u32m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_x_f_v_i64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_x_f_v_i64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_x_f_v_i64m8_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_xu_f_v_u64m2_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_xu_f_v_u64m4_mu(mask, maskedoff, src, vl) +#define __riscv_vfwcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl) __riscv_th_vfwcvt_xu_f_v_u64m8_mu(mask, maskedoff, src, vl) }] in def th_vector_floating_point_operations_wrapper_macros: RVVHeader; @@ -4892,18 +4892,18 @@ let HeaderCode = #define __riscv_vfredmax_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1(vector, scalar, vl) #define __riscv_vfredmax_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1(vector, scalar, vl) #define __riscv_vfredmax_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vfredmin_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1(vector, scalar, vl) #define __riscv_vfredmin_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1(vector, scalar, vl) #define __riscv_vfredmin_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1(vector, scalar, vl) @@ -4916,18 +4916,18 @@ let HeaderCode = #define __riscv_vfredmin_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1(vector, scalar, vl) #define __riscv_vfredmin_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1(vector, scalar, vl) #define __riscv_vfredmin_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vfredosum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1(vector, scalar, vl) #define __riscv_vfredosum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1(vector, scalar, vl) #define __riscv_vfredosum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1(vector, scalar, vl) @@ -4940,18 +4940,18 @@ let HeaderCode = #define __riscv_vfredosum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1(vector, scalar, vl) #define __riscv_vfredosum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1(vector, scalar, vl) #define __riscv_vfredosum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) #define __riscv_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) #define __riscv_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) @@ -4964,18 +4964,18 @@ let HeaderCode = #define __riscv_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) #define __riscv_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) #define __riscv_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vfredusum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) #define __riscv_vfredusum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) #define __riscv_vfredusum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) @@ -4988,18 +4988,18 @@ let HeaderCode = #define __riscv_vfredusum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) #define __riscv_vfredusum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) #define __riscv_vfredusum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) __riscv_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl) #define __riscv_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl) #define __riscv_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl) @@ -5008,14 +5008,14 @@ let HeaderCode = #define __riscv_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl) #define __riscv_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl) #define __riscv_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl) #define __riscv_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl) #define __riscv_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl) @@ -5024,14 +5024,14 @@ let HeaderCode = #define __riscv_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl) #define __riscv_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl) #define __riscv_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1_m(mask, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1_m(mask, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredand_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1(vector, scalar, vl) #define __riscv_vredand_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1(vector, scalar, vl) #define __riscv_vredand_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1(vector, scalar, vl) @@ -5064,38 +5064,38 @@ let HeaderCode = #define __riscv_vredand_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1(vector, scalar, vl) #define __riscv_vredand_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1(vector, scalar, vl) #define __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredand_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredmax_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1(vector, scalar, vl) #define __riscv_vredmax_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1(vector, scalar, vl) #define __riscv_vredmax_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1(vector, scalar, vl) @@ -5112,22 +5112,22 @@ let HeaderCode = #define __riscv_vredmax_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1(vector, scalar, vl) #define __riscv_vredmax_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1(vector, scalar, vl) #define __riscv_vredmax_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl) #define __riscv_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl) #define __riscv_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl) @@ -5144,22 +5144,22 @@ let HeaderCode = #define __riscv_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl) #define __riscv_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl) #define __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredmin_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1(vector, scalar, vl) #define __riscv_vredmin_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1(vector, scalar, vl) #define __riscv_vredmin_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1(vector, scalar, vl) @@ -5176,22 +5176,22 @@ let HeaderCode = #define __riscv_vredmin_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1(vector, scalar, vl) #define __riscv_vredmin_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1(vector, scalar, vl) #define __riscv_vredmin_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredminu_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1(vector, scalar, vl) #define __riscv_vredminu_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1(vector, scalar, vl) #define __riscv_vredminu_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1(vector, scalar, vl) @@ -5208,22 +5208,22 @@ let HeaderCode = #define __riscv_vredminu_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1(vector, scalar, vl) #define __riscv_vredminu_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1(vector, scalar, vl) #define __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredor_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1(vector, scalar, vl) #define __riscv_vredor_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1(vector, scalar, vl) #define __riscv_vredor_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1(vector, scalar, vl) @@ -5256,38 +5256,38 @@ let HeaderCode = #define __riscv_vredor_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1(vector, scalar, vl) #define __riscv_vredor_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1(vector, scalar, vl) #define __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredsum_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1(vector, scalar, vl) #define __riscv_vredsum_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1(vector, scalar, vl) #define __riscv_vredsum_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1(vector, scalar, vl) @@ -5320,38 +5320,38 @@ let HeaderCode = #define __riscv_vredsum_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1(vector, scalar, vl) #define __riscv_vredsum_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1(vector, scalar, vl) #define __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vredxor_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1(vector, scalar, vl) #define __riscv_vredxor_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1(vector, scalar, vl) #define __riscv_vredxor_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1(vector, scalar, vl) @@ -5384,38 +5384,38 @@ let HeaderCode = #define __riscv_vredxor_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1(vector, scalar, vl) #define __riscv_vredxor_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1(vector, scalar, vl) #define __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vwredsum_vs_i8m1_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1(vector, scalar, vl) #define __riscv_vwredsum_vs_i8m2_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1(vector, scalar, vl) #define __riscv_vwredsum_vs_i8m4_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1(vector, scalar, vl) @@ -5428,18 +5428,18 @@ let HeaderCode = #define __riscv_vwredsum_vs_i32m2_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1(vector, scalar, vl) #define __riscv_vwredsum_vs_i32m4_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1(vector, scalar, vl) #define __riscv_vwredsum_vs_i32m8_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1_mu(mask, dest, vector, scalar, vl) #define __riscv_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl) #define __riscv_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl) #define __riscv_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl) @@ -5458,24 +5458,24 @@ let HeaderCode = #define __riscv_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl) #define __riscv_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl) #define __riscv_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32mf2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1_mu(mask, dest, vector, scalar, vl) }] in def th_vector_reduction_operations_wrapper_macros: RVVHeader; @@ -5566,28 +5566,28 @@ let HeaderCode = #define __riscv_vmsbf_m_b2(op1, vl) __riscv_th_vmsbf_m_b2(op1, vl) #define __riscv_vmsbf_m_b4(op1, vl) __riscv_th_vmsbf_m_b4(op1, vl) #define __riscv_vmsbf_m_b8(op1, vl) __riscv_th_vmsbf_m_b8(op1, vl) -#define __riscv_vmsbf_m_b1_m(mask, op1, vl) __riscv_th_vmsbf_m_b1_m(mask, op1, vl) -#define __riscv_vmsbf_m_b2_m(mask, op1, vl) __riscv_th_vmsbf_m_b2_m(mask, op1, vl) -#define __riscv_vmsbf_m_b4_m(mask, op1, vl) __riscv_th_vmsbf_m_b4_m(mask, op1, vl) -#define __riscv_vmsbf_m_b8_m(mask, op1, vl) __riscv_th_vmsbf_m_b8_m(mask, op1, vl) +#define __riscv_vmsbf_m_b1_m(mask, maskedoff, op1, vl) __riscv_th_vmsbf_m_b1_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsbf_m_b2_m(mask, maskedoff, op1, vl) __riscv_th_vmsbf_m_b2_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsbf_m_b4_m(mask, maskedoff, op1, vl) __riscv_th_vmsbf_m_b4_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsbf_m_b8_m(mask, maskedoff, op1, vl) __riscv_th_vmsbf_m_b8_mu(mask, maskedoff, op1, vl) #define __riscv_vmsof_m_b1(op1, vl) __riscv_th_vmsof_m_b1(op1, vl) #define __riscv_vmsof_m_b2(op1, vl) __riscv_th_vmsof_m_b2(op1, vl) #define __riscv_vmsof_m_b4(op1, vl) __riscv_th_vmsof_m_b4(op1, vl) #define __riscv_vmsof_m_b8(op1, vl) __riscv_th_vmsof_m_b8(op1, vl) -#define __riscv_vmsof_m_b1_m(mask, op1, vl) __riscv_th_vmsof_m_b1_m(mask, op1, vl) -#define __riscv_vmsof_m_b2_m(mask, op1, vl) __riscv_th_vmsof_m_b2_m(mask, op1, vl) -#define __riscv_vmsof_m_b4_m(mask, op1, vl) __riscv_th_vmsof_m_b4_m(mask, op1, vl) -#define __riscv_vmsof_m_b8_m(mask, op1, vl) __riscv_th_vmsof_m_b8_m(mask, op1, vl) +#define __riscv_vmsof_m_b1_m(mask, maskedoff, op1, vl) __riscv_th_vmsof_m_b1_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsof_m_b2_m(mask, maskedoff, op1, vl) __riscv_th_vmsof_m_b2_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsof_m_b4_m(mask, maskedoff, op1, vl) __riscv_th_vmsof_m_b4_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsof_m_b8_m(mask, maskedoff, op1, vl) __riscv_th_vmsof_m_b8_mu(mask, maskedoff, op1, vl) #define __riscv_vmsif_m_b1(op1, vl) __riscv_th_vmsif_m_b1(op1, vl) #define __riscv_vmsif_m_b2(op1, vl) __riscv_th_vmsif_m_b2(op1, vl) #define __riscv_vmsif_m_b4(op1, vl) __riscv_th_vmsif_m_b4(op1, vl) #define __riscv_vmsif_m_b8(op1, vl) __riscv_th_vmsif_m_b8(op1, vl) -#define __riscv_vmsif_m_b1_m(mask, op1, vl) __riscv_th_vmsif_m_b1_m(mask, op1, vl) -#define __riscv_vmsif_m_b2_m(mask, op1, vl) __riscv_th_vmsif_m_b2_m(mask, op1, vl) -#define __riscv_vmsif_m_b4_m(mask, op1, vl) __riscv_th_vmsif_m_b4_m(mask, op1, vl) -#define __riscv_vmsif_m_b8_m(mask, op1, vl) __riscv_th_vmsif_m_b8_m(mask, op1, vl) +#define __riscv_vmsif_m_b1_m(mask, maskedoff, op1, vl) __riscv_th_vmsif_m_b1_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsif_m_b2_m(mask, maskedoff, op1, vl) __riscv_th_vmsif_m_b2_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsif_m_b4_m(mask, maskedoff, op1, vl) __riscv_th_vmsif_m_b4_mu(mask, maskedoff, op1, vl) +#define __riscv_vmsif_m_b8_m(mask, maskedoff, op1, vl) __riscv_th_vmsif_m_b8_mu(mask, maskedoff, op1, vl) #define __riscv_vid_v_u8m1(vl) __riscv_th_vid_v_u8m1(vl) #define __riscv_vid_v_u8m2(vl) __riscv_th_vid_v_u8m2(vl) @@ -5606,22 +5606,22 @@ let HeaderCode = #define __riscv_vid_v_u64m4(vl) __riscv_th_vid_v_u64m4(vl) #define __riscv_vid_v_u64m8(vl) __riscv_th_vid_v_u64m8(vl) -#define __riscv_vid_v_u8m1_m(mask, vl) __riscv_th_vid_v_u8m1_m(mask, vl) -#define __riscv_vid_v_u8m2_m(mask, vl) __riscv_th_vid_v_u8m2_m(mask, vl) -#define __riscv_vid_v_u8m4_m(mask, vl) __riscv_th_vid_v_u8m4_m(mask, vl) -#define __riscv_vid_v_u8m8_m(mask, vl) __riscv_th_vid_v_u8m8_m(mask, vl) -#define __riscv_vid_v_u16m1_m(mask, vl) __riscv_th_vid_v_u16m1_m(mask, vl) -#define __riscv_vid_v_u16m2_m(mask, vl) __riscv_th_vid_v_u16m2_m(mask, vl) -#define __riscv_vid_v_u16m4_m(mask, vl) __riscv_th_vid_v_u16m4_m(mask, vl) -#define __riscv_vid_v_u16m8_m(mask, vl) __riscv_th_vid_v_u16m8_m(mask, vl) -#define __riscv_vid_v_u32m1_m(mask, vl) __riscv_th_vid_v_u32m1_m(mask, vl) -#define __riscv_vid_v_u32m2_m(mask, vl) __riscv_th_vid_v_u32m2_m(mask, vl) -#define __riscv_vid_v_u32m4_m(mask, vl) __riscv_th_vid_v_u32m4_m(mask, vl) -#define __riscv_vid_v_u32m8_m(mask, vl) __riscv_th_vid_v_u32m8_m(mask, vl) -#define __riscv_vid_v_u64m1_m(mask, vl) __riscv_th_vid_v_u64m1_m(mask, vl) -#define __riscv_vid_v_u64m2_m(mask, vl) __riscv_th_vid_v_u64m2_m(mask, vl) -#define __riscv_vid_v_u64m4_m(mask, vl) __riscv_th_vid_v_u64m4_m(mask, vl) -#define __riscv_vid_v_u64m8_m(mask, vl) __riscv_th_vid_v_u64m8_m(mask, vl) +#define __riscv_vid_v_u8m1_m(mask, maskedoff, vl) __riscv_th_vid_v_u8m1_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u8m2_m(mask, maskedoff, vl) __riscv_th_vid_v_u8m2_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u8m4_m(mask, maskedoff, vl) __riscv_th_vid_v_u8m4_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u8m8_m(mask, maskedoff, vl) __riscv_th_vid_v_u8m8_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u16m1_m(mask, maskedoff, vl) __riscv_th_vid_v_u16m1_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u16m2_m(mask, maskedoff, vl) __riscv_th_vid_v_u16m2_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u16m4_m(mask, maskedoff, vl) __riscv_th_vid_v_u16m4_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u16m8_m(mask, maskedoff, vl) __riscv_th_vid_v_u16m8_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u32m1_m(mask, maskedoff, vl) __riscv_th_vid_v_u32m1_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u32m2_m(mask, maskedoff, vl) __riscv_th_vid_v_u32m2_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u32m4_m(mask, maskedoff, vl) __riscv_th_vid_v_u32m4_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u32m8_m(mask, maskedoff, vl) __riscv_th_vid_v_u32m8_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u64m1_m(mask, maskedoff, vl) __riscv_th_vid_v_u64m1_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u64m2_m(mask, maskedoff, vl) __riscv_th_vid_v_u64m2_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u64m4_m(mask, maskedoff, vl) __riscv_th_vid_v_u64m4_mu(mask, maskedoff, vl) +#define __riscv_vid_v_u64m8_m(mask, maskedoff, vl) __riscv_th_vid_v_u64m8_mu(mask, maskedoff, vl) #define __riscv_viota_m_u8m1(op1, vl) __riscv_th_viota_m_u8m1(op1, vl) #define __riscv_viota_m_u8m2(op1, vl) __riscv_th_viota_m_u8m2(op1, vl) @@ -5640,22 +5640,22 @@ let HeaderCode = #define __riscv_viota_m_u64m4(op1, vl) __riscv_th_viota_m_u64m4(op1, vl) #define __riscv_viota_m_u64m8(op1, vl) __riscv_th_viota_m_u64m8(op1, vl) -#define __riscv_viota_m_u8m1_m(mask, op1, vl) __riscv_th_viota_m_u8m1_m(mask, op1, vl) -#define __riscv_viota_m_u8m2_m(mask, op1, vl) __riscv_th_viota_m_u8m2_m(mask, op1, vl) -#define __riscv_viota_m_u8m4_m(mask, op1, vl) __riscv_th_viota_m_u8m4_m(mask, op1, vl) -#define __riscv_viota_m_u8m8_m(mask, op1, vl) __riscv_th_viota_m_u8m8_m(mask, op1, vl) -#define __riscv_viota_m_u16m1_m(mask, op1, vl) __riscv_th_viota_m_u16m1_m(mask, op1, vl) -#define __riscv_viota_m_u16m2_m(mask, op1, vl) __riscv_th_viota_m_u16m2_m(mask, op1, vl) -#define __riscv_viota_m_u16m4_m(mask, op1, vl) __riscv_th_viota_m_u16m4_m(mask, op1, vl) -#define __riscv_viota_m_u16m8_m(mask, op1, vl) __riscv_th_viota_m_u16m8_m(mask, op1, vl) -#define __riscv_viota_m_u32m1_m(mask, op1, vl) __riscv_th_viota_m_u32m1_m(mask, op1, vl) -#define __riscv_viota_m_u32m2_m(mask, op1, vl) __riscv_th_viota_m_u32m2_m(mask, op1, vl) -#define __riscv_viota_m_u32m4_m(mask, op1, vl) __riscv_th_viota_m_u32m4_m(mask, op1, vl) -#define __riscv_viota_m_u32m8_m(mask, op1, vl) __riscv_th_viota_m_u32m8_m(mask, op1, vl) -#define __riscv_viota_m_u64m1_m(mask, op1, vl) __riscv_th_viota_m_u64m1_m(mask, op1, vl) -#define __riscv_viota_m_u64m2_m(mask, op1, vl) __riscv_th_viota_m_u64m2_m(mask, op1, vl) -#define __riscv_viota_m_u64m4_m(mask, op1, vl) __riscv_th_viota_m_u64m4_m(mask, op1, vl) -#define __riscv_viota_m_u64m8_m(mask, op1, vl) __riscv_th_viota_m_u64m8_m(mask, op1, vl) +#define __riscv_viota_m_u8m1_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u8m1_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u8m2_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u8m2_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u8m4_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u8m4_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u8m8_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u8m8_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u16m1_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u16m1_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u16m2_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u16m2_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u16m4_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u16m4_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u16m8_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u16m8_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u32m1_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u32m1_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u32m2_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u32m2_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u32m4_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u32m4_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u32m8_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u32m8_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u64m1_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u64m1_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u64m2_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u64m2_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u64m4_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u64m4_mu(mask, maskedoff, op1, vl) +#define __riscv_viota_m_u64m8_m(mask, maskedoff, op1, vl) __riscv_th_viota_m_u64m8_mu(mask, maskedoff, op1, vl) }] in def th_vector_mask_wrapper_macros: RVVHeader; @@ -5796,94 +5796,94 @@ let HeaderCode = #define __riscv_vrgather_vx_u64m4(op1, index, vl) __riscv_th_vrgather_vx_u64m4(op1, index, vl) #define __riscv_vrgather_vv_u64m8(op1, index, vl) __riscv_th_vrgather_vv_u64m8(op1, index, vl) #define __riscv_vrgather_vx_u64m8(op1, index, vl) __riscv_th_vrgather_vx_u64m8(op1, index, vl) -#define __riscv_vrgather_vv_f16m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f16m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f16m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f16m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f16m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f16m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f16m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f16m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f16m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f16m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f16m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f16m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f16m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f16m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f16m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f16m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f32m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f32m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f32m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f32m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f32m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f32m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f32m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f32m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f32m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f32m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f32m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f32m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f32m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f32m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f32m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f32m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f64m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f64m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f64m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f64m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f64m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f64m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f64m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f64m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f64m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f64m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f64m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f64m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_f64m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_f64m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_f64m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_f64m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i8m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i8m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i8m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i8m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i8m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i8m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i8m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i8m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i8m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i8m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i8m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i8m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i8m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i8m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i8m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i8m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i16m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i16m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i16m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i16m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i16m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i16m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i16m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i16m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i16m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i16m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i16m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i16m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i16m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i16m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i16m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i16m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i32m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i32m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i32m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i32m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i32m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i32m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i32m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i32m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i32m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i32m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i32m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i32m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i32m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i32m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i32m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i32m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i64m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i64m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i64m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i64m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i64m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i64m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i64m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i64m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i64m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i64m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i64m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i64m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_i64m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_i64m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_i64m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_i64m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u8m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u8m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u8m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u8m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u8m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u8m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u8m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u8m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u8m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u8m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u8m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u8m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u8m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u8m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u8m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u8m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u16m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u16m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u16m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u16m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u16m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u16m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u16m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u16m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u16m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u16m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u16m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u16m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u16m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u16m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u16m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u16m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u32m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u32m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u32m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u32m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u32m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u32m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u32m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u32m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u32m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u32m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u32m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u32m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u32m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u32m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u32m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u32m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u64m1_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u64m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u64m1_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u64m1_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u64m2_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u64m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u64m2_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u64m2_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u64m4_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u64m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u64m4_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u64m4_m(mask, op1, index, vl) -#define __riscv_vrgather_vv_u64m8_m(mask, op1, index, vl) __riscv_th_vrgather_vv_u64m8_m(mask, op1, index, vl) -#define __riscv_vrgather_vx_u64m8_m(mask, op1, index, vl) __riscv_th_vrgather_vx_u64m8_m(mask, op1, index, vl) +#define __riscv_vrgather_vv_f16m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f16m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f16m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f16m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f16m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f16m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f16m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f16m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f16m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f16m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f16m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f16m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f16m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f16m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f16m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f16m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f32m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f32m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f32m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f32m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f32m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f32m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f32m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f32m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f32m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f32m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f32m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f32m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f32m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f32m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f32m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f32m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f64m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f64m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f64m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f64m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f64m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f64m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f64m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f64m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f64m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f64m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f64m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f64m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_f64m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_f64m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_f64m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_f64m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i8m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i8m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i8m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i8m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i8m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i8m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i8m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i8m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i8m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i8m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i8m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i8m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i8m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i8m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i8m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i8m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i16m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i16m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i16m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i16m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i16m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i16m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i16m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i16m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i16m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i16m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i16m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i16m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i16m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i16m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i16m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i16m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i32m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i32m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i32m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i32m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i32m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i32m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i32m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i32m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i32m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i32m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i32m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i32m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i32m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i32m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i32m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i32m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i64m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i64m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i64m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i64m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i64m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i64m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i64m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i64m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i64m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i64m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i64m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i64m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_i64m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_i64m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_i64m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_i64m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u8m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u8m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u8m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u8m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u8m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u8m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u8m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u8m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u8m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u8m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u8m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u8m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u8m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u8m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u8m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u8m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u16m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u16m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u16m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u16m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u16m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u16m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u16m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u16m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u16m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u16m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u16m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u16m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u16m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u16m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u16m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u16m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u32m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u32m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u32m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u32m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u32m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u32m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u32m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u32m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u32m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u32m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u32m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u32m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u32m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u32m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u32m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u32m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u64m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u64m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u64m1_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u64m1_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u64m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u64m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u64m2_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u64m2_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u64m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u64m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u64m4_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u64m4_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vv_u64m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vv_u64m8_mu(mask, maskedoff, op1, index, vl) +#define __riscv_vrgather_vx_u64m8_m(mask, maskedoff, op1, index, vl) __riscv_th_vrgather_vx_u64m8_mu(mask, maskedoff, op1, index, vl) #define __riscv_vslide1down_vx_i8m1(src, value, vl) __riscv_th_vslide1down_vx_i8m1(src, value, vl) #define __riscv_vslide1down_vx_i8m2(src, value, vl) __riscv_th_vslide1down_vx_i8m2(src, value, vl) #define __riscv_vslide1down_vx_i8m4(src, value, vl) __riscv_th_vslide1down_vx_i8m4(src, value, vl) @@ -5916,38 +5916,38 @@ let HeaderCode = #define __riscv_vslide1down_vx_u64m2(src, value, vl) __riscv_th_vslide1down_vx_u64m2(src, value, vl) #define __riscv_vslide1down_vx_u64m4(src, value, vl) __riscv_th_vslide1down_vx_u64m4(src, value, vl) #define __riscv_vslide1down_vx_u64m8(src, value, vl) __riscv_th_vslide1down_vx_u64m8(src, value, vl) -#define __riscv_vslide1down_vx_i8m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i8m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i8m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i8m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i8m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i8m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i8m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i8m8_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i16m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i16m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i16m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i16m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i16m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i16m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i16m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i16m8_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i32m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i32m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i32m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i32m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i32m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i32m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i32m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i32m8_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i64m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i64m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i64m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i64m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i64m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i64m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_i64m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_i64m8_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u8m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u8m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u8m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u8m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u8m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u8m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u8m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u8m8_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u16m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u16m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u16m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u16m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u16m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u16m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u16m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u16m8_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u32m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u32m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u32m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u32m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u32m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u32m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u32m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u32m8_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u64m1_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u64m1_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u64m2_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u64m2_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u64m4_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u64m4_m(mask, src, value, vl) -#define __riscv_vslide1down_vx_u64m8_m(mask, src, value, vl) __riscv_th_vslide1down_vx_u64m8_m(mask, src, value, vl) +#define __riscv_vslide1down_vx_i8m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i8m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i8m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i8m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i8m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i8m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i8m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i8m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i16m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i16m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i16m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i16m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i16m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i16m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i16m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i16m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i32m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i32m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i32m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i32m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i32m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i32m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i32m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i32m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i64m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i64m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i64m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i64m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i64m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i64m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_i64m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_i64m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u8m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u8m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u8m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u8m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u8m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u8m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u8m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u8m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u16m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u16m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u16m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u16m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u16m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u16m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u16m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u16m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u32m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u32m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u32m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u32m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u32m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u32m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u32m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u32m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u64m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u64m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u64m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u64m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u64m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u64m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1down_vx_u64m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1down_vx_u64m8_mu(mask, maskedoff, src, value, vl) #define __riscv_vslide1up_vx_i8m1(src, value, vl) __riscv_th_vslide1up_vx_i8m1(src, value, vl) #define __riscv_vslide1up_vx_i8m2(src, value, vl) __riscv_th_vslide1up_vx_i8m2(src, value, vl) #define __riscv_vslide1up_vx_i8m4(src, value, vl) __riscv_th_vslide1up_vx_i8m4(src, value, vl) @@ -5980,38 +5980,38 @@ let HeaderCode = #define __riscv_vslide1up_vx_u64m2(src, value, vl) __riscv_th_vslide1up_vx_u64m2(src, value, vl) #define __riscv_vslide1up_vx_u64m4(src, value, vl) __riscv_th_vslide1up_vx_u64m4(src, value, vl) #define __riscv_vslide1up_vx_u64m8(src, value, vl) __riscv_th_vslide1up_vx_u64m8(src, value, vl) -#define __riscv_vslide1up_vx_i8m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i8m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i8m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i8m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i8m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i8m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i8m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i8m8_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i16m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i16m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i16m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i16m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i16m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i16m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i16m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i16m8_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i32m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i32m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i32m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i32m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i32m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i32m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i32m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i32m8_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i64m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i64m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i64m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i64m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i64m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i64m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_i64m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_i64m8_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u8m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u8m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u8m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u8m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u8m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u8m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u8m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u8m8_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u16m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u16m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u16m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u16m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u16m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u16m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u16m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u16m8_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u32m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u32m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u32m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u32m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u32m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u32m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u32m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u32m8_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u64m1_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u64m1_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u64m2_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u64m2_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u64m4_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u64m4_m(mask, src, value, vl) -#define __riscv_vslide1up_vx_u64m8_m(mask, src, value, vl) __riscv_th_vslide1up_vx_u64m8_m(mask, src, value, vl) +#define __riscv_vslide1up_vx_i8m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i8m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i8m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i8m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i8m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i8m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i8m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i8m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i16m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i16m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i16m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i16m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i16m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i16m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i16m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i16m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i32m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i32m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i32m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i32m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i32m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i32m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i32m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i32m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i64m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i64m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i64m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i64m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i64m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i64m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_i64m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_i64m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u8m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u8m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u8m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u8m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u8m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u8m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u8m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u8m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u16m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u16m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u16m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u16m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u16m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u16m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u16m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u16m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u32m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u32m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u32m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u32m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u32m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u32m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u32m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u32m8_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u64m1_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u64m1_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u64m2_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u64m2_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u64m4_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u64m4_mu(mask, maskedoff, src, value, vl) +#define __riscv_vslide1up_vx_u64m8_m(mask, maskedoff, src, value, vl) __riscv_th_vslide1up_vx_u64m8_mu(mask, maskedoff, src, value, vl) #define __riscv_vslidedown_vx_f16m1(src, offset, vl) __riscv_th_vslidedown_vx_f16m1(src, offset, vl) #define __riscv_vslidedown_vx_f16m2(src, offset, vl) __riscv_th_vslidedown_vx_f16m2(src, offset, vl) #define __riscv_vslidedown_vx_f16m4(src, offset, vl) __riscv_th_vslidedown_vx_f16m4(src, offset, vl) @@ -6056,50 +6056,50 @@ let HeaderCode = #define __riscv_vslidedown_vx_u64m2(src, offset, vl) __riscv_th_vslidedown_vx_u64m2(src, offset, vl) #define __riscv_vslidedown_vx_u64m4(src, offset, vl) __riscv_th_vslidedown_vx_u64m4(src, offset, vl) #define __riscv_vslidedown_vx_u64m8(src, offset, vl) __riscv_th_vslidedown_vx_u64m8(src, offset, vl) -#define __riscv_vslidedown_vx_f16m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f16m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f16m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f16m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f16m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f16m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f16m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f16m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f32m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f32m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f32m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f32m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f32m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f32m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f32m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f32m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f64m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f64m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f64m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f64m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f64m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f64m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_f64m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_f64m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i8m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i8m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i8m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i8m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i8m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i8m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i8m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i8m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i16m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i16m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i16m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i16m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i16m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i16m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i16m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i16m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i32m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i32m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i32m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i32m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i32m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i32m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i32m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i32m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i64m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i64m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i64m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i64m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i64m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i64m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_i64m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_i64m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u8m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u8m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u8m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u8m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u8m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u8m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u8m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u8m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u16m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u16m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u16m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u16m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u16m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u16m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u16m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u16m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u32m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u32m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u32m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u32m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u32m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u32m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u32m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u32m8_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u64m1_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u64m1_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u64m2_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u64m2_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u64m4_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u64m4_m(mask, src, offset, vl) -#define __riscv_vslidedown_vx_u64m8_m(mask, src, offset, vl) __riscv_th_vslidedown_vx_u64m8_m(mask, src, offset, vl) +#define __riscv_vslidedown_vx_f16m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f16m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f16m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f16m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f16m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f16m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f16m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f16m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f32m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f32m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f32m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f32m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f32m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f32m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f32m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f32m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f64m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f64m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f64m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f64m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f64m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f64m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_f64m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_f64m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i8m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i8m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i8m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i8m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i8m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i8m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i8m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i8m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i16m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i16m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i16m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i16m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i16m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i16m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i16m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i16m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i32m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i32m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i32m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i32m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i32m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i32m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i32m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i32m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i64m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i64m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i64m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i64m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i64m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i64m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_i64m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_i64m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u8m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u8m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u8m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u8m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u8m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u8m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u8m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u8m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u16m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u16m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u16m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u16m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u16m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u16m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u16m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u16m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u32m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u32m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u32m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u32m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u32m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u32m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u32m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u32m8_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u64m1_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u64m1_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u64m2_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u64m2_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u64m4_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u64m4_mu(mask, dest, src, offset, vl) +#define __riscv_vslidedown_vx_u64m8_m(mask, dest, src, offset, vl) __riscv_th_vslidedown_vx_u64m8_mu(mask, dest, src, offset, vl) #define __riscv_vslideup_vx_f16m1(dest, src, offset, vl) __riscv_th_vslideup_vx_f16m1(dest, src, offset, vl) #define __riscv_vslideup_vx_f16m2(dest, src, offset, vl) __riscv_th_vslideup_vx_f16m2(dest, src, offset, vl) #define __riscv_vslideup_vx_f16m4(dest, src, offset, vl) __riscv_th_vslideup_vx_f16m4(dest, src, offset, vl) From 5658498f8ba13d48ed896e4f72c9e31c5641c1e1 Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 15:59:40 +0800 Subject: [PATCH 02/12] [Clang][XTHeadVector] fix wrapper tests for vector-floating-conv (TAMU) --- .../vector-floating-conv/wrappers/vfcvt.c | 384 +++++++++--------- .../vector-floating-conv/wrappers/vfncvt.c | 288 ++++++------- .../vector-floating-conv/wrappers/vfwcvt_f.c | 192 ++++----- .../vector-floating-conv/wrappers/vfwcvt_x.c | 96 ++--- 4 files changed, 480 insertions(+), 480 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfcvt.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfcvt.c index 3bc21b8087584..6daeefbe9d136 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfcvt.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfcvt.c @@ -488,481 +488,481 @@ vfloat64m8_t test_vfcvt_f_xu_v_f64m8(vuint64m8_t src, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m1_m(mask, src, vl); +vint16m1_t test_vfcvt_x_f_v_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m2_m(mask, src, vl); +vint16m2_t test_vfcvt_x_f_v_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m4_m(mask, src, vl); +vint16m4_t test_vfcvt_x_f_v_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i16m8_m(mask, src, vl); +vint16m8_t test_vfcvt_x_f_v_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i16m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv4i16.nxv4f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m1_m(mask, src, vl); +vuint16m1_t test_vfcvt_xu_f_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv8i16.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m2_m(mask, src, vl); +vuint16m2_t test_vfcvt_xu_f_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv16i16.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m4_m(mask, src, vl); +vuint16m4_t test_vfcvt_xu_f_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv32i16.nxv32f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u16m8_m(mask, src, vl); +vuint16m8_t test_vfcvt_xu_f_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u16m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfcvt_f_x_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint16m1_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint16m2_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint16m4_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint16m8_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv4f16.nxv4i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfcvt_f_xu_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint16m1_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv8f16.nxv8i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint16m2_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv16f16.nxv16i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint16m4_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv32f16.nxv32i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint16m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint16m8_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m1_m(mask, src, vl); +vint32m1_t test_vfcvt_x_f_v_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m2_m(mask, src, vl); +vint32m2_t test_vfcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m4_m(mask, src, vl); +vint32m4_t test_vfcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i32m8_m(mask, src, vl); +vint32m8_t test_vfcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv2i32.nxv2f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m1_m(mask, src, vl); +vuint32m1_t test_vfcvt_xu_f_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv4i32.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m2_m(mask, src, vl); +vuint32m2_t test_vfcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv8i32.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m4_m(mask, src, vl); +vuint32m4_t test_vfcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv16i32.nxv16f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u32m8_m(mask, src, vl); +vuint32m8_t test_vfcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfcvt_f_x_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint32m1_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint32m2_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint32m4_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint32m8_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv2f32.nxv2i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfcvt_f_xu_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint32m1_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv4f32.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint32m2_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv8f32.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint32m4_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv16f32.nxv16i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint32m8_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m1_m(mask, src, vl); +vint64m1_t test_vfcvt_x_f_v_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m2_m(mask, src, vl); +vint64m2_t test_vfcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m4_m(mask, src, vl); +vint64m4_t test_vfcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_x_f_v_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.x.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_x_f_v_i64m8_m(mask, src, vl); +vint64m8_t test_vfcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv1i64.nxv1f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vfloat64m1_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m1_m(mask, src, vl); +vuint64m1_t test_vfcvt_xu_f_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv2i64.nxv2f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m2_m(mask, src, vl); +vuint64m2_t test_vfcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv4i64.nxv4f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m4_m(mask, src, vl); +vuint64m4_t test_vfcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_xu_f_v_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.xu.f.v.mask.nxv8i64.nxv8f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfcvt_xu_f_v_u64m8_m(mask, src, vl); +vuint64m8_t test_vfcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m1_m(mask, src, vl); +vfloat64m1_t test_vfcvt_f_x_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vint64m1_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint64m2_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint64m4_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_x_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.x.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_x_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint64m8_t src, size_t vl) { + return __riscv_vfcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv1f64.nxv1i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vuint64m1_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m1_m(mask, src, vl); +vfloat64m1_t test_vfcvt_f_xu_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vuint64m1_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv2f64.nxv2i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint64m2_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv4f64.nxv4i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint64m4_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfcvt_f_xu_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfcvt.f.xu.v.mask.nxv8f64.nxv8i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfcvt_f_xu_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint64m8_t src, size_t vl) { + return __riscv_vfcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfncvt.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfncvt.c index f437d8f8a47fc..9bef7247f3370 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfncvt.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfncvt.c @@ -368,361 +368,361 @@ vfloat32m4_t test_vfncvt_f_f_w_f32m4(vfloat64m8_t src, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m1_m(mask, src, vl); +vint8m1_t test_vfncvt_x_f_w_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m2_m(mask, src, vl); +vint8m2_t test_vfncvt_x_f_w_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i8m4_m(mask, src, vl); +vint8m4_t test_vfncvt_x_f_w_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i8m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv8i8.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m1_m(mask, src, vl); +vuint8m1_t test_vfncvt_xu_f_w_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv16i8.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m2_m(mask, src, vl); +vuint8m2_t test_vfncvt_xu_f_w_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv32i8.nxv32f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vfloat16m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u8m4_m(mask, src, vl); +vuint8m4_t test_vfncvt_xu_f_w_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vfloat16m8_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u8m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m1_m(mask, src, vl); +vint16m1_t test_vfncvt_x_f_w_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m2_m(mask, src, vl); +vint16m2_t test_vfncvt_x_f_w_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i16m4_m(mask, src, vl); +vint16m4_t test_vfncvt_x_f_w_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv4i16.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m1_m(mask, src, vl); +vuint16m1_t test_vfncvt_xu_f_w_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv8i16.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m2_m(mask, src, vl); +vuint16m2_t test_vfncvt_xu_f_w_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv16i16.nxv16f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u16m4_m(mask, src, vl); +vuint16m4_t test_vfncvt_xu_f_w_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_x_w_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfncvt_f_x_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vint32m2_t src, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_x_w_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfncvt_f_x_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint32m4_t src, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_x_w_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfncvt_f_x_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint32m8_t src, size_t vl) { + return __riscv_vfncvt_f_x_w_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_xu_w_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv4f16.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfncvt_f_xu_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vuint32m2_t src, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_xu_w_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv8f16.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfncvt_f_xu_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint32m4_t src, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_xu_w_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv16f16.nxv16i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vuint32m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfncvt_f_xu_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint32m8_t src, size_t vl) { + return __riscv_vfncvt_f_xu_w_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv4f16.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m1_m(mask, src, vl); +vfloat16m1_t test_vfncvt_f_f_w_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv8f16.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfncvt_f_f_w_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv16f16.nxv16f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat32m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfncvt_f_f_w_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat32m8_t src, size_t vl) { + return __riscv_vfncvt_f_f_w_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m1_m(mask, src, vl); +vint32m1_t test_vfncvt_x_f_w_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m2_m(mask, src, vl); +vint32m2_t test_vfncvt_x_f_w_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_x_f_w_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.x.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_x_f_w_i32m4_m(mask, src, vl); +vint32m4_t test_vfncvt_x_f_w_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_x_f_w_i32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv2i32.nxv2f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m1_m(mask, src, vl); +vuint32m1_t test_vfncvt_xu_f_w_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv4i32.nxv4f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m2_m(mask, src, vl); +vuint32m2_t test_vfncvt_xu_f_w_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_xu_f_w_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.xu.f.w.mask.nxv8i32.nxv8f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_xu_f_w_u32m4_m(mask, src, vl); +vuint32m4_t test_vfncvt_xu_f_w_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_xu_f_w_u32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_x_w_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfncvt_f_x_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vint64m2_t src, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_x_w_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfncvt_f_x_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint64m4_t src, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_x_w_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.x.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_x_w_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfncvt_f_x_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint64m8_t src, size_t vl) { + return __riscv_vfncvt_f_x_w_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_xu_w_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv2f32.nxv2i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vuint64m2_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfncvt_f_xu_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vuint64m2_t src, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_xu_w_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv4f32.nxv4i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vuint64m4_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfncvt_f_xu_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint64m4_t src, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_xu_w_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.xu.w.mask.nxv8f32.nxv8i64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vuint64m8_t src, size_t vl) { - return __riscv_vfncvt_f_xu_w_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfncvt_f_xu_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint64m8_t src, size_t vl) { + return __riscv_vfncvt_f_xu_w_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv2f32.nxv2f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat64m2_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m1_m(mask, src, vl); +vfloat32m1_t test_vfncvt_f_f_w_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat64m2_t src, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m1_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv4f32.nxv4f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat64m4_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfncvt_f_f_w_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat64m4_t src, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfncvt_f_f_w_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfncvt.f.f.w.mask.nxv8f32.nxv8f64.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat64m8_t src, size_t vl) { - return __riscv_vfncvt_f_f_w_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfncvt_f_f_w_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat64m8_t src, size_t vl) { + return __riscv_vfncvt_f_f_w_f32m4_m(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_f.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_f.c index 59394d5ae1015..e10f5e9f168b6 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_f.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_f.c @@ -248,241 +248,241 @@ vfloat64m8_t test_vfwcvt_f_f_v_f64m8(vfloat32m4_t src, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfwcvt_f_x_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vint8m1_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfwcvt_f_x_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vint8m2_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfwcvt_f_x_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vint8m4_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f16m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv8f16.nxv8i8.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vuint8m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m2_m(mask, src, vl); +vfloat16m2_t test_vfwcvt_f_xu_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vuint8m1_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv16f16.nxv16i8.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vuint8m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m4_m(mask, src, vl); +vfloat16m4_t test_vfwcvt_f_xu_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vuint8m2_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv32f16.nxv32i8.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vuint8m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f16m8_m(mask, src, vl); +vfloat16m8_t test_vfwcvt_f_xu_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vuint8m4_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f16m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_x_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vint16m1_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_x_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vint16m2_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_x_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vint16m4_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv4f32.nxv4i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vuint16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_xu_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vuint16m1_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv8f32.nxv8i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vuint16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_xu_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vuint16m2_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv16f32.nxv16i16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vuint16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_xu_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vuint16m4_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m2_m(mask, src, vl); +vfloat32m2_t test_vfwcvt_f_f_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m4_m(mask, src, vl); +vfloat32m4_t test_vfwcvt_f_f_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f32m8_m(mask, src, vl); +vfloat32m8_t test_vfwcvt_f_f_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_f_f_v_f32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_x_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vint32m1_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_x_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vint32m2_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_x_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.x.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_x_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_x_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vint32m4_t src, size_t vl) { + return __riscv_vfwcvt_f_x_v_f64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv2f64.nxv2i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vuint32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_xu_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vuint32m1_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv4f64.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vuint32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_xu_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vuint32m2_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_xu_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.xu.v.mask.nxv8f64.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vuint32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_xu_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_xu_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vuint32m4_t src, size_t vl) { + return __riscv_vfwcvt_f_xu_v_f64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m2_m(mask, src, vl); +vfloat64m2_t test_vfwcvt_f_f_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m4_m(mask, src, vl); +vfloat64m4_t test_vfwcvt_f_f_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_f_f_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.f.f.v.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_f_f_v_f64m8_m(mask, src, vl); +vfloat64m8_t test_vfwcvt_f_f_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_f_f_v_f64m8_m(mask, maskedoff, src, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_x.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_x.c index 0d6b1f29ea7be..37587640fb490 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_x.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating-conv/wrappers/vfwcvt_x.c @@ -128,121 +128,121 @@ vuint64m8_t test_vfwcvt_xu_f_v_u64m8(vfloat32m4_t src, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_x_f_v_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m2_m(mask, src, vl); +vint32m2_t test_vfwcvt_x_f_v_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_x_f_v_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m4_m(mask, src, vl); +vint32m4_t test_vfwcvt_x_f_v_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_x_f_v_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i32m8_m(mask, src, vl); +vint32m8_t test_vfwcvt_x_f_v_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_x_f_v_i32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_xu_f_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv4i32.nxv4f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vfloat16m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m2_m(mask, src, vl); +vuint32m2_t test_vfwcvt_xu_f_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat16m1_t src, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_xu_f_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv8i32.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vfloat16m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m4_m(mask, src, vl); +vuint32m4_t test_vfwcvt_xu_f_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat16m2_t src, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_xu_f_v_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv16i32.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vfloat16m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u32m8_m(mask, src, vl); +vuint32m8_t test_vfwcvt_xu_f_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat16m4_t src, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u32m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_x_f_v_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m2_m(mask, src, vl); +vint64m2_t test_vfwcvt_x_f_v_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_x_f_v_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m4_m(mask, src, vl); +vint64m4_t test_vfwcvt_x_f_v_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_x_f_v_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.x.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_x_f_v_i64m8_m(mask, src, vl); +vint64m8_t test_vfwcvt_x_f_v_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_x_f_v_i64m8_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_xu_f_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv2i64.nxv2f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vfloat32m1_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m2_m(mask, src, vl); +vuint64m2_t test_vfwcvt_xu_f_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat32m1_t src, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m2_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_xu_f_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv4i64.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vfloat32m2_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m4_m(mask, src, vl); +vuint64m4_t test_vfwcvt_xu_f_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat32m2_t src, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m4_m(mask, maskedoff, src, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwcvt_xu_f_v_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( poison, [[SRC]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwcvt.xu.f.v.mask.nxv8i64.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vfloat32m4_t src, size_t vl) { - return __riscv_vfwcvt_xu_f_v_u64m8_m(mask, src, vl); +vuint64m8_t test_vfwcvt_xu_f_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat32m4_t src, size_t vl) { + return __riscv_vfwcvt_xu_f_v_u64m8_m(mask, maskedoff, src, vl); } From 290f1dcff2d0bf1b404a1b926aa82a4cf12c556e Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 16:22:18 +0800 Subject: [PATCH 03/12] [Clang][XTHeadVector] fix wrapper tests for vector-integer-compare (TAMU) --- .../vector-integer-compare/wrappers/vmseq.c | 512 ++++++++--------- .../vector-integer-compare/wrappers/vmsge.c | 256 ++++----- .../vector-integer-compare/wrappers/vmsgeu.c | 257 +++++---- .../vector-integer-compare/wrappers/vmsgt.c | 256 ++++----- .../vector-integer-compare/wrappers/vmsgtu.c | 256 ++++----- .../vector-integer-compare/wrappers/vmslt.c | 256 ++++----- .../vector-integer-compare/wrappers/vmsltu.c | 256 ++++----- .../vector-integer-compare/wrappers/vmsne.c | 513 +++++++++--------- 8 files changed, 1280 insertions(+), 1282 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmseq.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmseq.c index 929137f4b0d7c..6e3a2cc74c7d8 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmseq.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmseq.c @@ -647,642 +647,642 @@ vbool8_t test_vmseq_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vmseq_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vmseq_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vmseq_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vmseq_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vmseq_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vmseq_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vmseq_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmseq_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vmseq_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vmseq_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vmseq_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vmseq_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vmseq_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vmseq_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vmseq_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vmseq_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmseq_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vmseq_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vmseq_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vmseq_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vmseq_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vmseq_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vmseq_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vmseq_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vmseq_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmseq_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vmseq_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vmseq_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vmseq_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vmseq_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vmseq_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vmseq_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vmseq_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vmseq_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmseq_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vmseq_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vmseq_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vmseq_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vmseq_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vmseq_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vmseq_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vmseq_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return __riscv_vmseq_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmseq_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmseq_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return __riscv_vmseq_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vmseq_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vmseq_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vmseq_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vmseq_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vmseq_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vmseq_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return __riscv_vmseq_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmseq_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmseq_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return __riscv_vmseq_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vmseq_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vmseq_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vmseq_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vmseq_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vmseq_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vmseq_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return __riscv_vmseq_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmseq_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmseq_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return __riscv_vmseq_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmseq_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmseq_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmseq_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmseq_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmseq_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmseq_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmseq_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmseq_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmseq_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vv_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmseq_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmseq_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmseq_vx_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmseq.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmseq_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmseq_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmseq_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsge.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsge.c index 183fb7a7c74ea..c8cdf7baf6d60 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsge.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsge.c @@ -327,322 +327,322 @@ vbool8_t test_vmsge_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vmsge_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vmsge_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vmsge_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vmsge_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vmsge_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vmsge_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsge_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vmsge_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsge_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsge_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vmsge_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vmsge_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vmsge_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vmsge_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vmsge_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vmsge_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vmsge_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vmsge_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsge_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsge_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vmsge_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vmsge_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vmsge_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vmsge_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vmsge_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vmsge_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vmsge_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vmsge_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsge_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsge_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vmsge_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vmsge_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsge_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vmsge_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vmsge_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsge_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vmsge_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vmsge_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsge_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vmsge_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vv_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsge_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vmsge_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsge_vx_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsge.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsge_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsge_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vmsge_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgeu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgeu.c index 8e09795f7d276..19b6f871527d3 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgeu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgeu.c @@ -327,322 +327,321 @@ vbool8_t test_vmsgeu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgeu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgeu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgeu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgeu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgeu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgeu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgeu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vv_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgeu_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmsgeu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgeu_vx_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgeu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgeu_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgeu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgeu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgt.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgt.c index ce4b06a31a868..4b8e955e63121 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgt.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgt.c @@ -327,322 +327,322 @@ vbool8_t test_vmsgt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vmsgt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vmsgt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vmsgt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vmsgt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vmsgt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vmsgt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vmsgt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsgt_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vmsgt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vmsgt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vmsgt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vmsgt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vmsgt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vmsgt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vmsgt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vmsgt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsgt_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vmsgt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vmsgt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vmsgt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vmsgt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vmsgt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vmsgt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vmsgt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vmsgt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsgt_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vmsgt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vmsgt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vmsgt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vmsgt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vmsgt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vmsgt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vmsgt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vv_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsgt_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vmsgt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgt_vx_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgt.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsgt_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vmsgt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgtu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgtu.c index 61d027f7b3ea1..537745b9eaea5 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgtu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsgtu.c @@ -327,322 +327,322 @@ vbool8_t test_vmsgtu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgtu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsgtu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsgtu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsgtu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsgtu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsgtu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsgtu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vv_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsgtu_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmsgtu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsgtu_vx_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsgtu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsgtu_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsgtu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsgtu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmslt.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmslt.c index 0e814c3b70321..aae3d42e7fc44 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmslt.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmslt.c @@ -327,322 +327,322 @@ vbool8_t test_vmslt_vx_i64m8_b8(vint64m8_t op1, int64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vmslt_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vmslt_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vmslt_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vmslt_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vmslt_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vmslt_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmslt_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vmslt_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmslt_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmslt_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vmslt_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vmslt_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vmslt_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vmslt_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vmslt_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vmslt_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vmslt_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vmslt_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmslt_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmslt_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vmslt_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vmslt_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vmslt_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vmslt_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vmslt_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vmslt_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vmslt_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vmslt_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmslt_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmslt_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vmslt_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vmslt_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmslt_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vmslt_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vmslt_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmslt_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vmslt_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vmslt_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmslt_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vmslt_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vv_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmslt_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vmslt_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmslt_vx_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmslt.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmslt_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmslt_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vmslt_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsltu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsltu.c index 538f9b447913d..1d29840409c5e 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsltu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsltu.c @@ -327,322 +327,322 @@ vbool8_t test_vmsltu_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vmsltu_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsltu_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vmsltu_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsltu_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vmsltu_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsltu_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsltu_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return __riscv_vmsltu_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsltu_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsltu_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsltu_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vmsltu_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsltu_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vmsltu_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsltu_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vmsltu_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsltu_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return __riscv_vmsltu_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsltu_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsltu_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsltu_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vmsltu_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsltu_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vmsltu_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsltu_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vmsltu_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsltu_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return __riscv_vmsltu_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsltu_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsltu_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsltu_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmsltu_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsltu_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsltu_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmsltu_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsltu_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsltu_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmsltu_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsltu_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsltu_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vv_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsltu_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmsltu_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsltu_vx_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsltu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsltu_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsltu_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsltu_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsne.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsne.c index f3536bb7e653e..f54a2ccb1fa3c 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsne.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-integer-compare/wrappers/vmsne.c @@ -647,642 +647,641 @@ vbool8_t test_vmsne_vx_u64m8_b8(vuint64m8_t op1, uint64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vmsne_vv_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vmsne_vx_i8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vmsne_vv_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vmsne_vx_i8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vmsne_vv_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_i8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vmsne_vx_i8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vv_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vmsne_vv_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vmsne_vx_i8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vx_i8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vmsne_vx_i8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vmsne_vv_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vmsne_vx_i16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vmsne_vv_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vmsne_vx_i16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vmsne_vv_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vmsne_vx_i16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vmsne_vv_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vmsne_vx_i16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_i16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vmsne_vx_i16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vmsne_vv_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vmsne_vx_i32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vmsne_vv_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vmsne_vx_i32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vmsne_vv_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vmsne_vx_i32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vmsne_vv_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vmsne_vx_i32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_i32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vmsne_vx_i32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vmsne_vv_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_i64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vmsne_vx_i64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vmsne_vv_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_i64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vmsne_vx_i64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vmsne_vv_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_i64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vmsne_vx_i64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vmsne_vv_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_i64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vmsne_vx_i64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_i64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vmsne_vx_i64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vmsne_vv_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u8m1_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m1_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u8m1_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsne_vx_u8m1_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vmsne_vv_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u8m2_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m2_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u8m2_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsne_vx_u8m2_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vmsne_vv_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u8m4_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m4_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_u8m4_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsne_vx_u8m4_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vv_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return __riscv_vmsne_vv_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u8m8_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vmsne_vx_u8m8_b1_m(mask, op1, op2, vl); +vbool1_t test_vmsne_vx_u8m8_b1_m(vbool1_t mask, vbool1_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return __riscv_vmsne_vx_u8m8_b1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vmsne_vv_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsne_vx_u16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vmsne_vv_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsne_vx_u16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vmsne_vv_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsne_vx_u16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vv_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return __riscv_vmsne_vv_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vmsne_vx_u16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmsne_vx_u16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return __riscv_vmsne_vx_u16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vmsne_vv_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsne_vx_u32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vmsne_vv_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsne_vx_u32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vmsne_vv_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsne_vx_u32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vv_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return __riscv_vmsne_vv_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vmsne_vx_u32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmsne_vx_u32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return __riscv_vmsne_vx_u32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vv_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vmsne_vv_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmsne_vx_u64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsne_vx_u64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vv_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vmsne_vv_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmsne_vx_u64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsne_vx_u64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vv_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vmsne_vv_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmsne_vx_u64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsne_vx_u64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vv_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vmsne_vv_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vv_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vmsne_vv_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsne_vx_u64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsne.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vmsne_vx_u64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmsne_vx_u64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vmsne_vx_u64m8_b8_m(mask, maskedoff, op1, op2, vl); } - From 3415b29d7c78b6c02c0634e05c4de79dedf354d4 Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 16:36:17 +0800 Subject: [PATCH 04/12] [Clang][XTHeadVector] fix wrapper tests for vector-mask-logical (TAMU) --- .../vector-mask-logical/wrappers/vid.c | 129 +++++++++--------- .../vector-mask-logical/wrappers/viota.c | 128 ++++++++--------- .../vector-mask-logical/wrappers/vmsbf.c | 32 ++--- .../vector-mask-logical/wrappers/vmsif.c | 32 ++--- .../vector-mask-logical/wrappers/vmsof.c | 32 ++--- 5 files changed, 177 insertions(+), 176 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vid.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vid.c index b7bb255be36bb..4c0bb05a3f6e1 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vid.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vid.c @@ -167,162 +167,163 @@ vuint64m8_t test_vid_v_u64m8(size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i8.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u8m1_m(mask, vl); +vuint8m1_t test_vid_v_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, size_t vl) { + return __riscv_vid_v_u8m1_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv16i8.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv16i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, size_t vl) { - return __riscv_vid_v_u8m2_m(mask, vl); +vuint8m2_t test_vid_v_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, size_t vl) { + return __riscv_vid_v_u8m2_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv32i8.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv32i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, size_t vl) { - return __riscv_vid_v_u8m4_m(mask, vl); +vuint8m4_t test_vid_v_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, size_t vl) { + return __riscv_vid_v_u8m4_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv64i8.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv64i8.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, size_t vl) { - return __riscv_vid_v_u8m8_m(mask, vl); +vuint8m8_t test_vid_v_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, size_t vl) { + return __riscv_vid_v_u8m8_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv4i16.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv4i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, size_t vl) { - return __riscv_vid_v_u16m1_m(mask, vl); +vuint16m1_t test_vid_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, size_t vl) { + return __riscv_vid_v_u16m1_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i16.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u16m2_m(mask, vl); +vuint16m2_t test_vid_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, size_t vl) { + return __riscv_vid_v_u16m2_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv16i16.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv16i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, size_t vl) { - return __riscv_vid_v_u16m4_m(mask, vl); +vuint16m4_t test_vid_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, size_t vl) { + return __riscv_vid_v_u16m4_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv32i16.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv32i16.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, size_t vl) { - return __riscv_vid_v_u16m8_m(mask, vl); +vuint16m8_t test_vid_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, size_t vl) { + return __riscv_vid_v_u16m8_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv2i32.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv2i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, size_t vl) { - return __riscv_vid_v_u32m1_m(mask, vl); +vuint32m1_t test_vid_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, size_t vl) { + return __riscv_vid_v_u32m1_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv4i32.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv4i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, size_t vl) { - return __riscv_vid_v_u32m2_m(mask, vl); +vuint32m2_t test_vid_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, size_t vl) { + return __riscv_vid_v_u32m2_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i32.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u32m4_m(mask, vl); +vuint32m4_t test_vid_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, size_t vl) { + return __riscv_vid_v_u32m4_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv16i32.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv16i32.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, size_t vl) { - return __riscv_vid_v_u32m8_m(mask, vl); +vuint32m8_t test_vid_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, size_t vl) { + return __riscv_vid_v_u32m8_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv1i64.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv1i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, size_t vl) { - return __riscv_vid_v_u64m1_m(mask, vl); +vuint64m1_t test_vid_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, size_t vl) { + return __riscv_vid_v_u64m1_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv2i64.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv2i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, size_t vl) { - return __riscv_vid_v_u64m2_m(mask, vl); +vuint64m2_t test_vid_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, size_t vl) { + return __riscv_vid_v_u64m2_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv4i64.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv4i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, size_t vl) { - return __riscv_vid_v_u64m4_m(mask, vl); +vuint64m4_t test_vid_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, size_t vl) { + return __riscv_vid_v_u64m4_m(mask, maskedoff, vl); } // CHECK-RV64-LABEL: define dso_local @test_vid_v_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i64.i64( poison, [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vid.mask.nxv8i64.i64( [[MASKEDOFF]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, size_t vl) { - return __riscv_vid_v_u64m8_m(mask, vl); +vuint64m8_t test_vid_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, size_t vl) { + return __riscv_vid_v_u64m8_m(mask, maskedoff, vl); } + diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/viota.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/viota.c index 6a8f4b9c4fa12..c1af1e4b6c655 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/viota.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/viota.c @@ -167,162 +167,162 @@ vuint64m8_t test_viota_m_u64m8(vbool8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i8.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u8m1_m(mask, op1, vl); +vuint8m1_t test_viota_m_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_m_u8m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv16i8.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u8m2_m(mask, op1, vl); +vuint8m2_t test_viota_m_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_viota_m_u8m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv32i8.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u8m4_m(mask, op1, vl); +vuint8m4_t test_viota_m_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vbool2_t op1, size_t vl) { + return __riscv_viota_m_u8m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv64i8.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_viota_m_u8m8_m(mask, op1, vl); +vuint8m8_t test_viota_m_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vbool1_t op1, size_t vl) { + return __riscv_viota_m_u8m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv4i16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u16m1_m(mask, op1, vl); +vuint16m1_t test_viota_m_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vbool16_t op1, size_t vl) { + return __riscv_viota_m_u16m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u16m2_m(mask, op1, vl); +vuint16m2_t test_viota_m_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_m_u16m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv16i16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u16m4_m(mask, op1, vl); +vuint16m4_t test_viota_m_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_viota_m_u16m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv32i16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_viota_m_u16m8_m(mask, op1, vl); +vuint16m8_t test_viota_m_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vbool2_t op1, size_t vl) { + return __riscv_viota_m_u16m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv2i32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u32m1_m(mask, op1, vl); +vuint32m1_t test_viota_m_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vbool32_t op1, size_t vl) { + return __riscv_viota_m_u32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv4i32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u32m2_m(mask, op1, vl); +vuint32m2_t test_viota_m_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vbool16_t op1, size_t vl) { + return __riscv_viota_m_u32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u32m4_m(mask, op1, vl); +vuint32m4_t test_viota_m_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_m_u32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv16i32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_viota_m_u32m8_m(mask, op1, vl); +vuint32m8_t test_viota_m_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_viota_m_u32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv1i64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vbool64_t op1, size_t vl) { - return __riscv_viota_m_u64m1_m(mask, op1, vl); +vuint64m1_t test_viota_m_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vbool64_t op1, size_t vl) { + return __riscv_viota_m_u64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv2i64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vbool32_t op1, size_t vl) { - return __riscv_viota_m_u64m2_m(mask, op1, vl); +vuint64m2_t test_viota_m_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vbool32_t op1, size_t vl) { + return __riscv_viota_m_u64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv4i64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vbool16_t op1, size_t vl) { - return __riscv_viota_m_u64m4_m(mask, op1, vl); +vuint64m4_t test_viota_m_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vbool16_t op1, size_t vl) { + return __riscv_viota_m_u64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_viota_m_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.viota.mask.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_viota_m_u64m8_m(mask, op1, vl); +vuint64m8_t test_viota_m_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_viota_m_u64m8_m(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsbf.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsbf.c index a7d66e4c394d4..630a72af9438c 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsbf.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsbf.c @@ -47,41 +47,41 @@ vbool8_t test_vmsbf_m_b8(vbool8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsbf_m_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv64i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv64i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsbf_m_b1_m(mask, op1, vl); +vbool1_t test_vmsbf_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { + return __riscv_vmsbf_m_b1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsbf_m_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv32i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv32i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsbf_m_b2_m(mask, op1, vl); +vbool2_t test_vmsbf_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { + return __riscv_vmsbf_m_b2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsbf_m_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv16i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv16i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsbf_m_b4_m(mask, op1, vl); +vbool4_t test_vmsbf_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_vmsbf_m_b4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsbf_m_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv8i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsbf.mask.nxv8i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsbf_m_b8_m(mask, op1, vl); +vbool8_t test_vmsbf_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_vmsbf_m_b8_m(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsif.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsif.c index 2a0e898021baf..163c7c78a5544 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsif.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsif.c @@ -47,41 +47,41 @@ vbool8_t test_vmsif_m_b8(vbool8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsif_m_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv64i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv64i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsif_m_b1_m(mask, op1, vl); +vbool1_t test_vmsif_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { + return __riscv_vmsif_m_b1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsif_m_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv32i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv32i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsif_m_b2_m(mask, op1, vl); +vbool2_t test_vmsif_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { + return __riscv_vmsif_m_b2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsif_m_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv16i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv16i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsif_m_b4_m(mask, op1, vl); +vbool4_t test_vmsif_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_vmsif_m_b4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsif_m_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv8i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsif.mask.nxv8i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsif_m_b8_m(mask, op1, vl); +vbool8_t test_vmsif_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_vmsif_m_b8_m(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsof.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsof.c index bc7e25f7972c9..84ab28bf64907 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsof.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-mask-logical/wrappers/vmsof.c @@ -47,41 +47,41 @@ vbool8_t test_vmsof_m_b8(vbool8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmsof_m_b1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv64i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv64i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t op1, size_t vl) { - return __riscv_vmsof_m_b1_m(mask, op1, vl); +vbool1_t test_vmsof_m_b1_m(vbool1_t mask, vbool1_t maskedoff, vbool1_t op1, size_t vl) { + return __riscv_vmsof_m_b1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsof_m_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv32i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv32i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t op1, size_t vl) { - return __riscv_vmsof_m_b2_m(mask, op1, vl); +vbool2_t test_vmsof_m_b2_m(vbool2_t mask, vbool2_t maskedoff, vbool2_t op1, size_t vl) { + return __riscv_vmsof_m_b2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsof_m_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv16i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv16i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t op1, size_t vl) { - return __riscv_vmsof_m_b4_m(mask, op1, vl); +vbool4_t test_vmsof_m_b4_m(vbool4_t mask, vbool4_t maskedoff, vbool4_t op1, size_t vl) { + return __riscv_vmsof_m_b4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmsof_m_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv8i1.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmsof.mask.nxv8i1.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t op1, size_t vl) { - return __riscv_vmsof_m_b8_m(mask, op1, vl); +vbool8_t test_vmsof_m_b8_m(vbool8_t mask, vbool8_t maskedoff, vbool8_t op1, size_t vl) { + return __riscv_vmsof_m_b8_m(mask, maskedoff, op1, vl); } From d436838441d07336af54036fda2af770fdf2befe Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 17:41:31 +0800 Subject: [PATCH 05/12] [Clang][XTHeadVector] fix wrapper tests for vector-permutation (TAMU) --- .../vector-permutation/wrappers/vrgather.c | 704 +++++++++--------- .../vector-permutation/wrappers/vslide1down.c | 256 +++---- .../vector-permutation/wrappers/vslide1up.c | 256 +++---- .../vector-permutation/wrappers/vslidedown.c | 352 ++++----- 4 files changed, 784 insertions(+), 784 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vrgather.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vrgather.c index c2990c57f67e4..4c0239fde18c7 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vrgather.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vrgather.c @@ -888,882 +888,882 @@ vuint64m8_t test_vrgather_vx_u64m8(vuint64m8_t op1, size_t index, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4f16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_f16m1_m(mask, op1, index, vl); +vfloat16m1_t test_vrgather_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_vv_f16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4f16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m1_m(mask, op1, index, vl); +vfloat16m1_t test_vrgather_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8f16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_f16m2_m(mask, op1, index, vl); +vfloat16m2_t test_vrgather_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_vv_f16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8f16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m2_m(mask, op1, index, vl); +vfloat16m2_t test_vrgather_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16f16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_f16m4_m(mask, op1, index, vl); +vfloat16m4_t test_vrgather_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_vv_f16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16f16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m4_m(mask, op1, index, vl); +vfloat16m4_t test_vrgather_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32f16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_f16m8_m(mask, op1, index, vl); +vfloat16m8_t test_vrgather_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_vv_f16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32f16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f16m8_m(mask, op1, index, vl); +vfloat16m8_t test_vrgather_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2f32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_f32m1_m(mask, op1, index, vl); +vfloat32m1_t test_vrgather_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vuint32m1_t index, size_t vl) { + return __riscv_vrgather_vv_f32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2f32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m1_m(mask, op1, index, vl); +vfloat32m1_t test_vrgather_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4f32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_f32m2_m(mask, op1, index, vl); +vfloat32m2_t test_vrgather_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vuint32m2_t index, size_t vl) { + return __riscv_vrgather_vv_f32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4f32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m2_m(mask, op1, index, vl); +vfloat32m2_t test_vrgather_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8f32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_f32m4_m(mask, op1, index, vl); +vfloat32m4_t test_vrgather_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vuint32m4_t index, size_t vl) { + return __riscv_vrgather_vv_f32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8f32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m4_m(mask, op1, index, vl); +vfloat32m4_t test_vrgather_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16f32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_f32m8_m(mask, op1, index, vl); +vfloat32m8_t test_vrgather_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vuint32m8_t index, size_t vl) { + return __riscv_vrgather_vv_f32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16f32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f32m8_m(mask, op1, index, vl); +vfloat32m8_t test_vrgather_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv1f64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_f64m1_m(mask, op1, index, vl); +vfloat64m1_t test_vrgather_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vuint64m1_t index, size_t vl) { + return __riscv_vrgather_vv_f64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv1f64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m1_m(mask, op1, index, vl); +vfloat64m1_t test_vrgather_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2f64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_f64m2_m(mask, op1, index, vl); +vfloat64m2_t test_vrgather_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vuint64m2_t index, size_t vl) { + return __riscv_vrgather_vv_f64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2f64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m2_m(mask, op1, index, vl); +vfloat64m2_t test_vrgather_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4f64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_f64m4_m(mask, op1, index, vl); +vfloat64m4_t test_vrgather_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vuint64m4_t index, size_t vl) { + return __riscv_vrgather_vv_f64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4f64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m4_m(mask, op1, index, vl); +vfloat64m4_t test_vrgather_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8f64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_f64m8_m(mask, op1, index, vl); +vfloat64m8_t test_vrgather_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vuint64m8_t index, size_t vl) { + return __riscv_vrgather_vv_f64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8f64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_f64m8_m(mask, op1, index, vl); +vfloat64m8_t test_vrgather_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_f64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_i8m1_m(mask, op1, index, vl); +vint8m1_t test_vrgather_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vuint8m1_t index, size_t vl) { + return __riscv_vrgather_vv_i8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m1_m(mask, op1, index, vl); +vint8m1_t test_vrgather_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_i8m2_m(mask, op1, index, vl); +vint8m2_t test_vrgather_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vuint8m2_t index, size_t vl) { + return __riscv_vrgather_vv_i8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m2_m(mask, op1, index, vl); +vint8m2_t test_vrgather_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_i8m4_m(mask, op1, index, vl); +vint8m4_t test_vrgather_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vuint8m4_t index, size_t vl) { + return __riscv_vrgather_vv_i8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m4_m(mask, op1, index, vl); +vint8m4_t test_vrgather_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv64i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_i8m8_m(mask, op1, index, vl); +vint8m8_t test_vrgather_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vuint8m8_t index, size_t vl) { + return __riscv_vrgather_vv_i8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv64i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i8m8_m(mask, op1, index, vl); +vint8m8_t test_vrgather_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_i16m1_m(mask, op1, index, vl); +vint16m1_t test_vrgather_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_vv_i16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m1_m(mask, op1, index, vl); +vint16m1_t test_vrgather_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_i16m2_m(mask, op1, index, vl); +vint16m2_t test_vrgather_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_vv_i16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m2_m(mask, op1, index, vl); +vint16m2_t test_vrgather_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_i16m4_m(mask, op1, index, vl); +vint16m4_t test_vrgather_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_vv_i16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m4_m(mask, op1, index, vl); +vint16m4_t test_vrgather_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_i16m8_m(mask, op1, index, vl); +vint16m8_t test_vrgather_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_vv_i16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i16m8_m(mask, op1, index, vl); +vint16m8_t test_vrgather_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_i32m1_m(mask, op1, index, vl); +vint32m1_t test_vrgather_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vuint32m1_t index, size_t vl) { + return __riscv_vrgather_vv_i32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m1_m(mask, op1, index, vl); +vint32m1_t test_vrgather_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_i32m2_m(mask, op1, index, vl); +vint32m2_t test_vrgather_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vuint32m2_t index, size_t vl) { + return __riscv_vrgather_vv_i32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m2_m(mask, op1, index, vl); +vint32m2_t test_vrgather_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_i32m4_m(mask, op1, index, vl); +vint32m4_t test_vrgather_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vuint32m4_t index, size_t vl) { + return __riscv_vrgather_vv_i32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m4_m(mask, op1, index, vl); +vint32m4_t test_vrgather_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_i32m8_m(mask, op1, index, vl); +vint32m8_t test_vrgather_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vuint32m8_t index, size_t vl) { + return __riscv_vrgather_vv_i32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i32m8_m(mask, op1, index, vl); +vint32m8_t test_vrgather_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv1i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_i64m1_m(mask, op1, index, vl); +vint64m1_t test_vrgather_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vuint64m1_t index, size_t vl) { + return __riscv_vrgather_vv_i64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv1i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m1_m(mask, op1, index, vl); +vint64m1_t test_vrgather_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_i64m2_m(mask, op1, index, vl); +vint64m2_t test_vrgather_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vuint64m2_t index, size_t vl) { + return __riscv_vrgather_vv_i64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m2_m(mask, op1, index, vl); +vint64m2_t test_vrgather_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_i64m4_m(mask, op1, index, vl); +vint64m4_t test_vrgather_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vuint64m4_t index, size_t vl) { + return __riscv_vrgather_vv_i64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m4_m(mask, op1, index, vl); +vint64m4_t test_vrgather_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_i64m8_m(mask, op1, index, vl); +vint64m8_t test_vrgather_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vuint64m8_t index, size_t vl) { + return __riscv_vrgather_vv_i64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_i64m8_m(mask, op1, index, vl); +vint64m8_t test_vrgather_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_i64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t index, size_t vl) { - return __riscv_vrgather_vv_u8m1_m(mask, op1, index, vl); +vuint8m1_t test_vrgather_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t index, size_t vl) { + return __riscv_vrgather_vv_u8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m1_m(mask, op1, index, vl); +vuint8m1_t test_vrgather_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u8m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t index, size_t vl) { - return __riscv_vrgather_vv_u8m2_m(mask, op1, index, vl); +vuint8m2_t test_vrgather_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t index, size_t vl) { + return __riscv_vrgather_vv_u8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m2_m(mask, op1, index, vl); +vuint8m2_t test_vrgather_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u8m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t index, size_t vl) { - return __riscv_vrgather_vv_u8m4_m(mask, op1, index, vl); +vuint8m4_t test_vrgather_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t index, size_t vl) { + return __riscv_vrgather_vv_u8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m4_m(mask, op1, index, vl); +vuint8m4_t test_vrgather_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u8m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv64i8.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t index, size_t vl) { - return __riscv_vrgather_vv_u8m8_m(mask, op1, index, vl); +vuint8m8_t test_vrgather_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t index, size_t vl) { + return __riscv_vrgather_vv_u8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv64i8.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u8m8_m(mask, op1, index, vl); +vuint8m8_t test_vrgather_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u8m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t index, size_t vl) { - return __riscv_vrgather_vv_u16m1_m(mask, op1, index, vl); +vuint16m1_t test_vrgather_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t index, size_t vl) { + return __riscv_vrgather_vv_u16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m1_m(mask, op1, index, vl); +vuint16m1_t test_vrgather_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u16m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t index, size_t vl) { - return __riscv_vrgather_vv_u16m2_m(mask, op1, index, vl); +vuint16m2_t test_vrgather_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t index, size_t vl) { + return __riscv_vrgather_vv_u16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m2_m(mask, op1, index, vl); +vuint16m2_t test_vrgather_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u16m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t index, size_t vl) { - return __riscv_vrgather_vv_u16m4_m(mask, op1, index, vl); +vuint16m4_t test_vrgather_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t index, size_t vl) { + return __riscv_vrgather_vv_u16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m4_m(mask, op1, index, vl); +vuint16m4_t test_vrgather_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u16m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i16.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t index, size_t vl) { - return __riscv_vrgather_vv_u16m8_m(mask, op1, index, vl); +vuint16m8_t test_vrgather_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t index, size_t vl) { + return __riscv_vrgather_vv_u16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i16.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u16m8_m(mask, op1, index, vl); +vuint16m8_t test_vrgather_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u16m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t index, size_t vl) { - return __riscv_vrgather_vv_u32m1_m(mask, op1, index, vl); +vuint32m1_t test_vrgather_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t index, size_t vl) { + return __riscv_vrgather_vv_u32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m1_m(mask, op1, index, vl); +vuint32m1_t test_vrgather_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u32m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t index, size_t vl) { - return __riscv_vrgather_vv_u32m2_m(mask, op1, index, vl); +vuint32m2_t test_vrgather_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t index, size_t vl) { + return __riscv_vrgather_vv_u32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m2_m(mask, op1, index, vl); +vuint32m2_t test_vrgather_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u32m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t index, size_t vl) { - return __riscv_vrgather_vv_u32m4_m(mask, op1, index, vl); +vuint32m4_t test_vrgather_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t index, size_t vl) { + return __riscv_vrgather_vv_u32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m4_m(mask, op1, index, vl); +vuint32m4_t test_vrgather_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u32m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i32.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t index, size_t vl) { - return __riscv_vrgather_vv_u32m8_m(mask, op1, index, vl); +vuint32m8_t test_vrgather_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t index, size_t vl) { + return __riscv_vrgather_vv_u32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i32.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u32m8_m(mask, op1, index, vl); +vuint32m8_t test_vrgather_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u32m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv1i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t index, size_t vl) { - return __riscv_vrgather_vv_u64m1_m(mask, op1, index, vl); +vuint64m1_t test_vrgather_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t index, size_t vl) { + return __riscv_vrgather_vv_u64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv1i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m1_m(mask, op1, index, vl); +vuint64m1_t test_vrgather_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u64m1_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t index, size_t vl) { - return __riscv_vrgather_vv_u64m2_m(mask, op1, index, vl); +vuint64m2_t test_vrgather_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t index, size_t vl) { + return __riscv_vrgather_vv_u64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m2_m(mask, op1, index, vl); +vuint64m2_t test_vrgather_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u64m2_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t index, size_t vl) { - return __riscv_vrgather_vv_u64m4_m(mask, op1, index, vl); +vuint64m4_t test_vrgather_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t index, size_t vl) { + return __riscv_vrgather_vv_u64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m4_m(mask, op1, index, vl); +vuint64m4_t test_vrgather_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u64m4_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i64.i64( poison, [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vv.mask.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t index, size_t vl) { - return __riscv_vrgather_vv_u64m8_m(mask, op1, index, vl); +vuint64m8_t test_vrgather_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t index, size_t vl) { + return __riscv_vrgather_vv_u64m8_m(mask, maskedoff, op1, index, vl); } // CHECK-RV64-LABEL: define dso_local @test_vrgather_vx_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[INDEX:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i64.i64( poison, [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vrgather.vx.mask.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[INDEX]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, size_t index, size_t vl) { - return __riscv_vrgather_vx_u64m8_m(mask, op1, index, vl); +vuint64m8_t test_vrgather_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, size_t index, size_t vl) { + return __riscv_vrgather_vx_u64m8_m(mask, maskedoff, op1, index, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1down.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1down.c index 64362b73828fb..b872034d3ca62 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1down.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1down.c @@ -327,322 +327,322 @@ vuint64m8_t test_vslide1down_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m1_m(mask, src, value, vl); +vint8m1_t test_vslide1down_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { + return __riscv_vslide1down_vx_i8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m2_m(mask, src, value, vl); +vint8m2_t test_vslide1down_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { + return __riscv_vslide1down_vx_i8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m4_m(mask, src, value, vl); +vint8m4_t test_vslide1down_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { + return __riscv_vslide1down_vx_i8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv64i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1down_vx_i8m8_m(mask, src, value, vl); +vint8m8_t test_vslide1down_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { + return __riscv_vslide1down_vx_i8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m1_m(mask, src, value, vl); +vint16m1_t test_vslide1down_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { + return __riscv_vslide1down_vx_i16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m2_m(mask, src, value, vl); +vint16m2_t test_vslide1down_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { + return __riscv_vslide1down_vx_i16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m4_m(mask, src, value, vl); +vint16m4_t test_vslide1down_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { + return __riscv_vslide1down_vx_i16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1down_vx_i16m8_m(mask, src, value, vl); +vint16m8_t test_vslide1down_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { + return __riscv_vslide1down_vx_i16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m1_m(mask, src, value, vl); +vint32m1_t test_vslide1down_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { + return __riscv_vslide1down_vx_i32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m2_m(mask, src, value, vl); +vint32m2_t test_vslide1down_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { + return __riscv_vslide1down_vx_i32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m4_m(mask, src, value, vl); +vint32m4_t test_vslide1down_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { + return __riscv_vslide1down_vx_i32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1down_vx_i32m8_m(mask, src, value, vl); +vint32m8_t test_vslide1down_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { + return __riscv_vslide1down_vx_i32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv1i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m1_m(mask, src, value, vl); +vint64m1_t test_vslide1down_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { + return __riscv_vslide1down_vx_i64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m2_m(mask, src, value, vl); +vint64m2_t test_vslide1down_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { + return __riscv_vslide1down_vx_i64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m4_m(mask, src, value, vl); +vint64m4_t test_vslide1down_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { + return __riscv_vslide1down_vx_i64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1down_vx_i64m8_m(mask, src, value, vl); +vint64m8_t test_vslide1down_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { + return __riscv_vslide1down_vx_i64m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m1_m(mask, src, value, vl); +vuint8m1_t test_vslide1down_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { + return __riscv_vslide1down_vx_u8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m2_m(mask, src, value, vl); +vuint8m2_t test_vslide1down_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { + return __riscv_vslide1down_vx_u8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m4_m(mask, src, value, vl); +vuint8m4_t test_vslide1down_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { + return __riscv_vslide1down_vx_u8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv64i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1down_vx_u8m8_m(mask, src, value, vl); +vuint8m8_t test_vslide1down_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { + return __riscv_vslide1down_vx_u8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m1_m(mask, src, value, vl); +vuint16m1_t test_vslide1down_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { + return __riscv_vslide1down_vx_u16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m2_m(mask, src, value, vl); +vuint16m2_t test_vslide1down_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { + return __riscv_vslide1down_vx_u16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m4_m(mask, src, value, vl); +vuint16m4_t test_vslide1down_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { + return __riscv_vslide1down_vx_u16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1down_vx_u16m8_m(mask, src, value, vl); +vuint16m8_t test_vslide1down_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { + return __riscv_vslide1down_vx_u16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m1_m(mask, src, value, vl); +vuint32m1_t test_vslide1down_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { + return __riscv_vslide1down_vx_u32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m2_m(mask, src, value, vl); +vuint32m2_t test_vslide1down_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { + return __riscv_vslide1down_vx_u32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m4_m(mask, src, value, vl); +vuint32m4_t test_vslide1down_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { + return __riscv_vslide1down_vx_u32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1down_vx_u32m8_m(mask, src, value, vl); +vuint32m8_t test_vslide1down_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { + return __riscv_vslide1down_vx_u32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv1i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m1_m(mask, src, value, vl); +vuint64m1_t test_vslide1down_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { + return __riscv_vslide1down_vx_u64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m2_m(mask, src, value, vl); +vuint64m2_t test_vslide1down_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { + return __riscv_vslide1down_vx_u64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m4_m(mask, src, value, vl); +vuint64m4_t test_vslide1down_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { + return __riscv_vslide1down_vx_u64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1down_vx_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1down.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1down_vx_u64m8_m(mask, src, value, vl); +vuint64m8_t test_vslide1down_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { + return __riscv_vslide1down_vx_u64m8_m(mask, maskedoff, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1up.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1up.c index f4f3d93daf556..ae2942a41900f 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1up.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslide1up.c @@ -327,322 +327,322 @@ vuint64m8_t test_vslide1up_vx_u64m8(vuint64m8_t src, uint64_t value, size_t vl) } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m1_m(mask, src, value, vl); +vint8m1_t test_vslide1up_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, int8_t value, size_t vl) { + return __riscv_vslide1up_vx_i8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m2_m(mask, src, value, vl); +vint8m2_t test_vslide1up_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, int8_t value, size_t vl) { + return __riscv_vslide1up_vx_i8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m4_m(mask, src, value, vl); +vint8m4_t test_vslide1up_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, int8_t value, size_t vl) { + return __riscv_vslide1up_vx_i8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv64i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t src, int8_t value, size_t vl) { - return __riscv_vslide1up_vx_i8m8_m(mask, src, value, vl); +vint8m8_t test_vslide1up_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, int8_t value, size_t vl) { + return __riscv_vslide1up_vx_i8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m1_m(mask, src, value, vl); +vint16m1_t test_vslide1up_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, int16_t value, size_t vl) { + return __riscv_vslide1up_vx_i16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m2_m(mask, src, value, vl); +vint16m2_t test_vslide1up_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, int16_t value, size_t vl) { + return __riscv_vslide1up_vx_i16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m4_m(mask, src, value, vl); +vint16m4_t test_vslide1up_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, int16_t value, size_t vl) { + return __riscv_vslide1up_vx_i16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t src, int16_t value, size_t vl) { - return __riscv_vslide1up_vx_i16m8_m(mask, src, value, vl); +vint16m8_t test_vslide1up_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, int16_t value, size_t vl) { + return __riscv_vslide1up_vx_i16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m1_m(mask, src, value, vl); +vint32m1_t test_vslide1up_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, int32_t value, size_t vl) { + return __riscv_vslide1up_vx_i32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m2_m(mask, src, value, vl); +vint32m2_t test_vslide1up_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, int32_t value, size_t vl) { + return __riscv_vslide1up_vx_i32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m4_m(mask, src, value, vl); +vint32m4_t test_vslide1up_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, int32_t value, size_t vl) { + return __riscv_vslide1up_vx_i32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t src, int32_t value, size_t vl) { - return __riscv_vslide1up_vx_i32m8_m(mask, src, value, vl); +vint32m8_t test_vslide1up_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, int32_t value, size_t vl) { + return __riscv_vslide1up_vx_i32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv1i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m1_m(mask, src, value, vl); +vint64m1_t test_vslide1up_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, int64_t value, size_t vl) { + return __riscv_vslide1up_vx_i64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m2_m(mask, src, value, vl); +vint64m2_t test_vslide1up_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, int64_t value, size_t vl) { + return __riscv_vslide1up_vx_i64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m4_m(mask, src, value, vl); +vint64m4_t test_vslide1up_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, int64_t value, size_t vl) { + return __riscv_vslide1up_vx_i64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t src, int64_t value, size_t vl) { - return __riscv_vslide1up_vx_i64m8_m(mask, src, value, vl); +vint64m8_t test_vslide1up_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, int64_t value, size_t vl) { + return __riscv_vslide1up_vx_i64m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m1_m(mask, src, value, vl); +vuint8m1_t test_vslide1up_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, uint8_t value, size_t vl) { + return __riscv_vslide1up_vx_u8m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m2_m(mask, src, value, vl); +vuint8m2_t test_vslide1up_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, uint8_t value, size_t vl) { + return __riscv_vslide1up_vx_u8m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m4_m(mask, src, value, vl); +vuint8m4_t test_vslide1up_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, uint8_t value, size_t vl) { + return __riscv_vslide1up_vx_u8m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i8 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv64i8.i8.i64( poison, [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[SRC]], i8 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, uint8_t value, size_t vl) { - return __riscv_vslide1up_vx_u8m8_m(mask, src, value, vl); +vuint8m8_t test_vslide1up_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, uint8_t value, size_t vl) { + return __riscv_vslide1up_vx_u8m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m1_m(mask, src, value, vl); +vuint16m1_t test_vslide1up_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, uint16_t value, size_t vl) { + return __riscv_vslide1up_vx_u16m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m2_m(mask, src, value, vl); +vuint16m2_t test_vslide1up_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, uint16_t value, size_t vl) { + return __riscv_vslide1up_vx_u16m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m4_m(mask, src, value, vl); +vuint16m4_t test_vslide1up_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, uint16_t value, size_t vl) { + return __riscv_vslide1up_vx_u16m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i16 noundef zeroext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i16.i16.i64( poison, [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[SRC]], i16 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, uint16_t value, size_t vl) { - return __riscv_vslide1up_vx_u16m8_m(mask, src, value, vl); +vuint16m8_t test_vslide1up_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, uint16_t value, size_t vl) { + return __riscv_vslide1up_vx_u16m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m1_m(mask, src, value, vl); +vuint32m1_t test_vslide1up_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, uint32_t value, size_t vl) { + return __riscv_vslide1up_vx_u32m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m2_m(mask, src, value, vl); +vuint32m2_t test_vslide1up_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, uint32_t value, size_t vl) { + return __riscv_vslide1up_vx_u32m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m4_m(mask, src, value, vl); +vuint32m4_t test_vslide1up_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, uint32_t value, size_t vl) { + return __riscv_vslide1up_vx_u32m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i32 noundef signext [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i32.i32.i64( poison, [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[SRC]], i32 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, uint32_t value, size_t vl) { - return __riscv_vslide1up_vx_u32m8_m(mask, src, value, vl); +vuint32m8_t test_vslide1up_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, uint32_t value, size_t vl) { + return __riscv_vslide1up_vx_u32m8_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv1i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m1_m(mask, src, value, vl); +vuint64m1_t test_vslide1up_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, uint64_t value, size_t vl) { + return __riscv_vslide1up_vx_u64m1_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m2_m(mask, src, value, vl); +vuint64m2_t test_vslide1up_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, uint64_t value, size_t vl) { + return __riscv_vslide1up_vx_u64m2_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m4_m(mask, src, value, vl); +vuint64m4_t test_vslide1up_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, uint64_t value, size_t vl) { + return __riscv_vslide1up_vx_u64m4_m(mask, maskedoff, src, value, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslide1up_vx_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[VALUE:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i64.i64.i64( poison, [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslide1up.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[VALUE]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, uint64_t value, size_t vl) { - return __riscv_vslide1up_vx_u64m8_m(mask, src, value, vl); +vuint64m8_t test_vslide1up_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, uint64_t value, size_t vl) { + return __riscv_vslide1up_vx_u64m8_m(mask, maskedoff, src, value, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslidedown.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslidedown.c index cde99de9bc8b8..817b8dcbf8113 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslidedown.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-permutation/wrappers/vslidedown.c @@ -447,442 +447,442 @@ vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4f16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4f16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m1_m(mask, src, offset, vl); +vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f16m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8f16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8f16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m2_m(mask, src, offset, vl); +vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f16m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16f16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16f16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m4_m(mask, src, offset, vl); +vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f16m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32f16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32f16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f16m8_m(mask, src, offset, vl); +vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f16m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2f32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2f32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m1_m(mask, src, offset, vl); +vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f32m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4f32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4f32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m2_m(mask, src, offset, vl); +vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f32m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8f32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8f32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m4_m(mask, src, offset, vl); +vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f32m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16f32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16f32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f32m8_m(mask, src, offset, vl); +vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f32m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv1f64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv1f64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m1_m(mask, src, offset, vl); +vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f64m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2f64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2f64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m2_m(mask, src, offset, vl); +vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f64m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4f64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4f64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m4_m(mask, src, offset, vl); +vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f64m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8f64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8f64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_f64m8_m(mask, src, offset, vl); +vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_f64m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m1_m(mask, src, offset, vl); +vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i8m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m2_m(mask, src, offset, vl); +vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i8m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m4_m(mask, src, offset, vl); +vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i8m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv64i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i8m8_m(mask, src, offset, vl); +vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i8m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m1_m(mask, src, offset, vl); +vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i16m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m2_m(mask, src, offset, vl); +vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i16m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m4_m(mask, src, offset, vl); +vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i16m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i16m8_m(mask, src, offset, vl); +vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i16m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m1_m(mask, src, offset, vl); +vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i32m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m2_m(mask, src, offset, vl); +vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i32m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m4_m(mask, src, offset, vl); +vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i32m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i32m8_m(mask, src, offset, vl); +vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i32m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv1i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m1_m(mask, src, offset, vl); +vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i64m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m2_m(mask, src, offset, vl); +vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i64m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m4_m(mask, src, offset, vl); +vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i64m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_i64m8_m(mask, src, offset, vl); +vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_i64m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m1_m(mask, src, offset, vl); +vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u8m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m2_m(mask, src, offset, vl); +vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u8m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m4_m(mask, src, offset, vl); +vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u8m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv64i8.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv64i8.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u8m8_m(mask, src, offset, vl); +vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u8m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m1_m(mask, src, offset, vl); +vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u16m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m2_m(mask, src, offset, vl); +vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u16m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m4_m(mask, src, offset, vl); +vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u16m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i16.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv32i16.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u16m8_m(mask, src, offset, vl); +vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u16m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m1_m(mask, src, offset, vl); +vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u32m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m2_m(mask, src, offset, vl); +vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u32m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m4_m(mask, src, offset, vl); +vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u32m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i32.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv16i32.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u32m8_m(mask, src, offset, vl); +vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u32m8_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv1i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv1i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m1_m(mask, src, offset, vl); +vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u64m1_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv2i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m2_m(mask, src, offset, vl); +vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u64m2_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv4i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m4_m(mask, src, offset, vl); +vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u64m4_m(mask, maskedoff, src, offset, vl); } // CHECK-RV64-LABEL: define dso_local @test_vslidedown_vx_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[SRC:%.*]], i64 noundef [[OFFSET:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i64.i64( poison, [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vslidedown.mask.nxv8i64.i64( [[MASKEDOFF]], [[SRC]], i64 [[OFFSET]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) { - return __riscv_vslidedown_vx_u64m8_m(mask, src, offset, vl); +vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) { + return __riscv_vslidedown_vx_u64m8_m(mask, maskedoff, src, offset, vl); } From b2df497c7063a1f3eff1967881a62052eab3f648 Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 17:42:01 +0800 Subject: [PATCH 06/12] [Clang][XTHeadVector] fix `HasMaskedOffOperand` of `th_vslidedown` --- clang/include/clang/Basic/riscv_vector_xtheadv.td | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv.td b/clang/include/clang/Basic/riscv_vector_xtheadv.td index 2a2ce4e8c4d5b..3f243cecb0209 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv.td @@ -2068,7 +2068,7 @@ let UnMaskedPolicyScheme = NonePolicy, let UnMaskedPolicyScheme = NonePolicy, MaskedPolicyScheme = HasPassthruOperand, - HasMaskedOffOperand = false, + HasMaskedOffOperand = true, ManualCodegen = [{ if (IsMasked) { std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1); From 9d6a1d076a4014c31b2fbc7b4d3e8e5d83df8d8e Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 17:57:41 +0800 Subject: [PATCH 07/12] [Clang][XTHeadVector] fix wrapper tests for vector-single-width-averaging (TAMU) --- .../wrappers/vaadd.c | 256 +++++++++--------- .../wrappers/vasub.c | 256 +++++++++--------- 2 files changed, 256 insertions(+), 256 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vaadd.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vaadd.c index 2fd77eb8a2be5..5cc8eb6befd74 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vaadd.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vaadd.c @@ -328,322 +328,322 @@ vint64m8_t test_vaadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vaadd_vv_i8m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vaadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vaadd_vx_i8m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vaadd_vv_i8m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vaadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vaadd_vx_i8m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vaadd_vv_i8m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vaadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vaadd_vx_i8m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vaadd_vv_i8m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vaadd_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vaadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vaadd_vx_i8m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vaadd_vv_i16m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vaadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vaadd_vx_i16m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vaadd_vv_i16m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vaadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vaadd_vx_i16m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vaadd_vv_i16m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vaadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vaadd_vx_i16m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vaadd_vv_i16m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vaadd_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vaadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vaadd_vx_i16m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vaadd_vv_i32m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vaadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vaadd_vx_i32m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vaadd_vv_i32m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vaadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vaadd_vx_i32m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vaadd_vv_i32m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vaadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vaadd_vx_i32m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vaadd_vv_i32m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vaadd_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vaadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vaadd_vx_i32m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vaadd_vv_i64m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vaadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vaadd_vx_i64m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vaadd_vv_i64m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vaadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vaadd_vx_i64m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vaadd_vv_i64m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vaadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vaadd_vx_i64m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vaadd_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vaadd_vv_i64m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vaadd_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vaadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vaadd_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vaadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vaadd_vx_i64m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vasub.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vasub.c index 278e0fb3e4179..8c2839c0f0add 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vasub.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-averaging/wrappers/vasub.c @@ -328,322 +328,322 @@ vint64m8_t test_vasub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vasub_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vasub_vv_i8m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m1_t test_vasub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vasub_vx_i8m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vasub_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vasub_vv_i8m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m2_t test_vasub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vasub_vx_i8m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vasub_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vasub_vv_i8m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m4_t test_vasub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vasub_vx_i8m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vasub_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vasub_vv_i8m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vasub_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint8m8_t test_vasub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vasub_vx_i8m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vasub_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vasub_vv_i16m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m1_t test_vasub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vasub_vx_i16m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vasub_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vasub_vv_i16m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m2_t test_vasub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vasub_vx_i16m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vasub_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vasub_vv_i16m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m4_t test_vasub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vasub_vx_i16m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vasub_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vasub_vv_i16m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vasub_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint16m8_t test_vasub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vasub_vx_i16m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vasub_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vasub_vv_i32m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m1_t test_vasub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vasub_vx_i32m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vasub_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vasub_vv_i32m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m2_t test_vasub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vasub_vx_i32m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vasub_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vasub_vv_i32m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m4_t test_vasub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vasub_vx_i32m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vasub_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vasub_vv_i32m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vasub_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint32m8_t test_vasub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vasub_vx_i32m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vasub_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vasub_vv_i64m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m1_t test_vasub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vasub_vx_i64m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vasub_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vasub_vv_i64m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m2_t test_vasub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vasub_vx_i64m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vasub_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vasub_vv_i64m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m4_t test_vasub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vasub_vx_i64m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vasub_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vasub_vv_i64m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vasub_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vasub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vasub_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vasub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vasub_vx_i64m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } From 67c04353328a54e79ef316de3d76d4ab0afd7bac Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 17:58:18 +0800 Subject: [PATCH 08/12] [Clang][XTHeadVector] fix wrapper tests for `vsmul` (TAMU) --- .../wrappers/vsmul.c | 633 +++++++++--------- 1 file changed, 309 insertions(+), 324 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/wrappers/vsmul.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/wrappers/vsmul.c index 831b421bd9f67..eb3ca079c8aca 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/wrappers/vsmul.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-fractional-multiply-with-rounding-and-saturation/wrappers/vsmul.c @@ -7,7 +7,6 @@ #include - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: @@ -28,27 +27,6 @@ vint8m1_t test_vsmul_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) { return __riscv_vsmul_vx_i8m1(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -69,27 +47,6 @@ vint8m2_t test_vsmul_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) { return __riscv_vsmul_vx_i8m2(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -110,27 +67,6 @@ vint8m4_t test_vsmul_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) { return __riscv_vsmul_vx_i8m4(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -151,27 +87,6 @@ vint8m8_t test_vsmul_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) { return __riscv_vsmul_vx_i8m8(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsmul_vx_i8m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -192,27 +107,6 @@ vint16m1_t test_vsmul_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) { return __riscv_vsmul_vx_i16m1(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -233,27 +127,6 @@ vint16m2_t test_vsmul_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) { return __riscv_vsmul_vx_i16m2(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -274,27 +147,6 @@ vint16m4_t test_vsmul_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) { return __riscv_vsmul_vx_i16m4(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -315,27 +167,6 @@ vint16m8_t test_vsmul_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) { return __riscv_vsmul_vx_i16m8(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsmul_vx_i16m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -356,27 +187,6 @@ vint32m1_t test_vsmul_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) { return __riscv_vsmul_vx_i32m1(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -397,27 +207,6 @@ vint32m2_t test_vsmul_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) { return __riscv_vsmul_vx_i32m2(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -438,27 +227,6 @@ vint32m4_t test_vsmul_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) { return __riscv_vsmul_vx_i32m4(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -479,27 +247,6 @@ vint32m8_t test_vsmul_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) { return __riscv_vsmul_vx_i32m8(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsmul_vx_i32m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -520,27 +267,6 @@ vint64m1_t test_vsmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) { return __riscv_vsmul_vx_i64m1(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m1_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -561,27 +287,6 @@ vint64m2_t test_vsmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) { return __riscv_vsmul_vx_i64m2(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m2_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -602,27 +307,6 @@ vint64m4_t test_vsmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) { return __riscv_vsmul_vx_i64m4(op1, op2, __RISCV_VXRM_RNU, vl); } -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m4_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); -} - - // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8 // CHECK-RV64-SAME: ( [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: @@ -643,23 +327,324 @@ vint64m8_t test_vsmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { return __riscv_vsmul_vx_i64m8(op1, op2, __RISCV_VXRM_RNU, vl); } + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m1_t test_vsmul_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m2_t test_vsmul_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m4_t test_vsmul_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i8m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i8m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i8m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint8m8_t test_vsmul_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vsmul_vx_i8m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m1_t test_vsmul_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m2_t test_vsmul_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m4_t test_vsmul_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i16m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i16m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint16m8_t test_vsmul_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vsmul_vx_i16m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m1_t test_vsmul_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m2_t test_vsmul_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m4_t test_vsmul_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i32m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i32m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i32m8_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint32m8_t test_vsmul_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vsmul_vx_i32m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m1_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m1_t test_vsmul_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m1_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m2_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m2_t test_vsmul_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m2_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + +// CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m4_m +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-NEXT: entry: +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: ret [[TMP0]] +// +vint64m4_t test_vsmul_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m4_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); +} + // CHECK-RV64-LABEL: define dso_local @test_vsmul_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsmul_vv_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vsmul_vv_i64m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsmul_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsmul.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 0, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsmul_vx_i64m8_m(mask, op1, op2, __RISCV_VXRM_RNU, vl); +vint64m8_t test_vsmul_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vsmul_vx_i64m8_m(mask, maskedoff, op1, op2, __RISCV_VXRM_RNU, vl); } From a77880655ecda3e164892138d631f97dca83e249 Mon Sep 17 00:00:00 2001 From: imkiva Date: Mon, 3 Jun 2024 17:58:33 +0800 Subject: [PATCH 09/12] [Clang][XTHeadVector] fix wrapper tests for vector-single-width-with-saturation-add (TAMU) --- .../wrappers/vsadd.c | 256 ++++++++--------- .../wrappers/vsaddu.c | 256 ++++++++--------- .../wrappers/vssub.c | 257 +++++++++--------- .../wrappers/vssubu.c | 256 ++++++++--------- 4 files changed, 512 insertions(+), 513 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsadd.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsadd.c index e81fcef3a2ba9..bd704b0c379fb 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsadd.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsadd.c @@ -327,322 +327,322 @@ vint64m8_t test_vsadd_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vsadd_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vsadd_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vsadd_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vsadd_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vsadd_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vsadd_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vsadd_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vsadd_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vsadd_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vsadd_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vsadd_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vsadd_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vsadd_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vsadd_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vsadd_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vsadd_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vsadd_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vsadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vsadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vsadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vsadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vsadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vsadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vsadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vsadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vsadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vsadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vsadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vsadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vsadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vsadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vsadd_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vsadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vsadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vsadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vsadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vsadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vsadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vsadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vsadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vsadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vsadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vsadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vsadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vsadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vsadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vsadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vsadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vsadd_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vsadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vsadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vsadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vsadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vsadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vsadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vsadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vsadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vsadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vsadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vsadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vsadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vsadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vsadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vsadd_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vsadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vsadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsadd_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsadd.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vsadd_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vsadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vsadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsaddu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsaddu.c index b8695689a4251..e6eba648b4603 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsaddu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vsaddu.c @@ -327,322 +327,322 @@ vuint64m8_t test_vsaddu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vsaddu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vsaddu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vsaddu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vsaddu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vsaddu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vsaddu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vsaddu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vsaddu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vsaddu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vsaddu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vsaddu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vsaddu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vsaddu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return __riscv_vsaddu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vsaddu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vsaddu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return __riscv_vsaddu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vsaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vsaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vsaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vsaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vsaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vsaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vsaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vsaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vsaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vsaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vsaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vsaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vsaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return __riscv_vsaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vsaddu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vsaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return __riscv_vsaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vsaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vsaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vsaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vsaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vsaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vsaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vsaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vsaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vsaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vsaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vsaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vsaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vsaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return __riscv_vsaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vsaddu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vsaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return __riscv_vsaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vsaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vsaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vsaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vsaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vsaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vsaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vsaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vsaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vsaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vsaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vsaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vsaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vsaddu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vsaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vsaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vsaddu_vx_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vsaddu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vsaddu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vsaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vsaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssub.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssub.c index b6688705485ff..47332e2e570f0 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssub.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssub.c @@ -327,322 +327,321 @@ vint64m8_t test_vssub_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) { - return __riscv_vssub_vv_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vssub_vv_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) { + return __riscv_vssub_vv_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m1_m(mask, op1, op2, vl); +vint8m1_t test_vssub_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) { + return __riscv_vssub_vx_i8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) { - return __riscv_vssub_vv_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vssub_vv_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) { + return __riscv_vssub_vv_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m2_m(mask, op1, op2, vl); +vint8m2_t test_vssub_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) { + return __riscv_vssub_vx_i8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) { - return __riscv_vssub_vv_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vssub_vv_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) { + return __riscv_vssub_vv_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m4_m(mask, op1, op2, vl); +vint8m4_t test_vssub_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) { + return __riscv_vssub_vx_i8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) { - return __riscv_vssub_vv_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vssub_vv_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) { + return __riscv_vssub_vv_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) { - return __riscv_vssub_vx_i8m8_m(mask, op1, op2, vl); +vint8m8_t test_vssub_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) { + return __riscv_vssub_vx_i8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) { - return __riscv_vssub_vv_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vssub_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) { + return __riscv_vssub_vv_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m1_m(mask, op1, op2, vl); +vint16m1_t test_vssub_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) { + return __riscv_vssub_vx_i16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) { - return __riscv_vssub_vv_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vssub_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) { + return __riscv_vssub_vv_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m2_m(mask, op1, op2, vl); +vint16m2_t test_vssub_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) { + return __riscv_vssub_vx_i16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) { - return __riscv_vssub_vv_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vssub_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) { + return __riscv_vssub_vv_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m4_m(mask, op1, op2, vl); +vint16m4_t test_vssub_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) { + return __riscv_vssub_vx_i16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) { - return __riscv_vssub_vv_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vssub_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) { + return __riscv_vssub_vv_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) { - return __riscv_vssub_vx_i16m8_m(mask, op1, op2, vl); +vint16m8_t test_vssub_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) { + return __riscv_vssub_vx_i16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) { - return __riscv_vssub_vv_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vssub_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) { + return __riscv_vssub_vv_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m1_m(mask, op1, op2, vl); +vint32m1_t test_vssub_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) { + return __riscv_vssub_vx_i32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) { - return __riscv_vssub_vv_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vssub_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) { + return __riscv_vssub_vv_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m2_m(mask, op1, op2, vl); +vint32m2_t test_vssub_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) { + return __riscv_vssub_vx_i32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) { - return __riscv_vssub_vv_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vssub_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) { + return __riscv_vssub_vv_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m4_m(mask, op1, op2, vl); +vint32m4_t test_vssub_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) { + return __riscv_vssub_vx_i32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) { - return __riscv_vssub_vv_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vssub_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) { + return __riscv_vssub_vv_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) { - return __riscv_vssub_vx_i32m8_m(mask, op1, op2, vl); +vint32m8_t test_vssub_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) { + return __riscv_vssub_vx_i32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) { - return __riscv_vssub_vv_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vssub_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) { + return __riscv_vssub_vv_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m1_m(mask, op1, op2, vl); +vint64m1_t test_vssub_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) { + return __riscv_vssub_vx_i64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) { - return __riscv_vssub_vv_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vssub_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) { + return __riscv_vssub_vv_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m2_m(mask, op1, op2, vl); +vint64m2_t test_vssub_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) { + return __riscv_vssub_vx_i64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) { - return __riscv_vssub_vv_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vssub_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) { + return __riscv_vssub_vv_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m4_m(mask, op1, op2, vl); +vint64m4_t test_vssub_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) { + return __riscv_vssub_vx_i64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vv_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) { - return __riscv_vssub_vv_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vssub_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) { + return __riscv_vssub_vv_i64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssub_vx_i64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssub.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) { - return __riscv_vssub_vx_i64m8_m(mask, op1, op2, vl); +vint64m8_t test_vssub_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) { + return __riscv_vssub_vx_i64m8_m(mask, maskedoff, op1, op2, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssubu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssubu.c index 8d5317535c6f5..0965cd909b55b 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssubu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-single-width-saturating-add/wrappers/vssubu.c @@ -327,322 +327,322 @@ vuint64m8_t test_vssubu_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i8.nxv8i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vssubu_vv_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) { + return __riscv_vssubu_vv_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m1_m(mask, op1, op2, vl); +vuint8m1_t test_vssubu_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) { + return __riscv_vssubu_vx_u8m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i8.nxv16i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i8.nxv16i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vssubu_vv_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) { + return __riscv_vssubu_vv_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m2_m(mask, op1, op2, vl); +vuint8m2_t test_vssubu_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) { + return __riscv_vssubu_vx_u8m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i8.nxv32i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i8.nxv32i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vssubu_vv_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) { + return __riscv_vssubu_vv_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m4_m(mask, op1, op2, vl); +vuint8m4_t test_vssubu_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) { + return __riscv_vssubu_vx_u8m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv64i8.nxv64i8.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv64i8.nxv64i8.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vssubu_vv_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) { + return __riscv_vssubu_vv_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u8m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i8 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv64i8.i8.i64( poison, [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv64i8.i8.i64( [[MASKEDOFF]], [[OP1]], i8 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) { - return __riscv_vssubu_vx_u8m8_m(mask, op1, op2, vl); +vuint8m8_t test_vssubu_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) { + return __riscv_vssubu_vx_u8m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i16.nxv4i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vssubu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) { + return __riscv_vssubu_vv_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m1_m(mask, op1, op2, vl); +vuint16m1_t test_vssubu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) { + return __riscv_vssubu_vx_u16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i16.nxv8i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i16.nxv8i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vssubu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) { + return __riscv_vssubu_vv_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m2_m(mask, op1, op2, vl); +vuint16m2_t test_vssubu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) { + return __riscv_vssubu_vx_u16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i16.nxv16i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i16.nxv16i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vssubu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) { + return __riscv_vssubu_vv_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m4_m(mask, op1, op2, vl); +vuint16m4_t test_vssubu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) { + return __riscv_vssubu_vx_u16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i16.nxv32i16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i16.nxv32i16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vssubu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) { + return __riscv_vssubu_vv_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i16 noundef zeroext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i16.i16.i64( poison, [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv32i16.i16.i64( [[MASKEDOFF]], [[OP1]], i16 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) { - return __riscv_vssubu_vx_u16m8_m(mask, op1, op2, vl); +vuint16m8_t test_vssubu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) { + return __riscv_vssubu_vx_u16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i32.nxv2i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vssubu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) { + return __riscv_vssubu_vv_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m1_m(mask, op1, op2, vl); +vuint32m1_t test_vssubu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) { + return __riscv_vssubu_vx_u32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i32.nxv4i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i32.nxv4i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vssubu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) { + return __riscv_vssubu_vv_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m2_m(mask, op1, op2, vl); +vuint32m2_t test_vssubu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) { + return __riscv_vssubu_vx_u32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i32.nxv8i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i32.nxv8i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vssubu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) { + return __riscv_vssubu_vv_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m4_m(mask, op1, op2, vl); +vuint32m4_t test_vssubu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) { + return __riscv_vssubu_vx_u32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i32.nxv16i32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i32.nxv16i32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vssubu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) { + return __riscv_vssubu_vv_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i32 noundef signext [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i32.i32.i64( poison, [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv16i32.i32.i64( [[MASKEDOFF]], [[OP1]], i32 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) { - return __riscv_vssubu_vx_u32m8_m(mask, op1, op2, vl); +vuint32m8_t test_vssubu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) { + return __riscv_vssubu_vx_u32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv1i64.nxv1i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vssubu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) { + return __riscv_vssubu_vv_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv1i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv1i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m1_m(mask, op1, op2, vl); +vuint64m1_t test_vssubu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) { + return __riscv_vssubu_vx_u64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i64.nxv2i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i64.nxv2i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vssubu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) { + return __riscv_vssubu_vv_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv2i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m2_m(mask, op1, op2, vl); +vuint64m2_t test_vssubu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) { + return __riscv_vssubu_vx_u64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i64.nxv4i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i64.nxv4i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vssubu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) { + return __riscv_vssubu_vv_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv4i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m4_m(mask, op1, op2, vl); +vuint64m4_t test_vssubu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) { + return __riscv_vssubu_vx_u64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vv_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i64.nxv8i64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i64.nxv8i64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { - return __riscv_vssubu_vv_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vssubu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) { + return __riscv_vssubu_vv_u64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vssubu_vx_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i64.i64.i64( poison, [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vssubu.mask.nxv8i64.i64.i64( [[MASKEDOFF]], [[OP1]], i64 [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) { - return __riscv_vssubu_vx_u64m8_m(mask, op1, op2, vl); +vuint64m8_t test_vssubu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) { + return __riscv_vssubu_vx_u64m8_m(mask, maskedoff, op1, op2, vl); } From a3ed7a578ec7908faf64fe73cc282c17bd4b266e Mon Sep 17 00:00:00 2001 From: imkiva Date: Tue, 4 Jun 2024 16:03:51 +0800 Subject: [PATCH 10/12] [Clang][XTHeadVector] make wrappers for vector-reduction defaults to TUM policy --- .../Basic/riscv_vector_xtheadv_wrappers.td | 1180 ++++++++--------- 1 file changed, 584 insertions(+), 596 deletions(-) diff --git a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td index cb5dfdad7d76f..1d0379b534210 100644 --- a/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td +++ b/clang/include/clang/Basic/riscv_vector_xtheadv_wrappers.td @@ -4880,602 +4880,590 @@ def th_vector_floating_point_operations_wrapper_macros: RVVHeader; let HeaderCode = [{ // Vector Reduction Operations -#define __riscv_vfredmax_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmax_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredmin_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredosum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m8_f16m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m1_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m2_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m4_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m8_f32m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m1_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredsum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m1_f16m1(vector, scalar, vl) __riscv_vfredsum_vs_f16m1_f16m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m2_f16m1(vector, scalar, vl) __riscv_vfredsum_vs_f16m2_f16m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m4_f16m1(vector, scalar, vl) __riscv_vfredsum_vs_f16m4_f16m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m8_f16m1(vector, scalar, vl) __riscv_vfredsum_vs_f16m8_f16m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m1_f32m1(vector, scalar, vl) __riscv_vfredsum_vs_f32m1_f32m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m2_f32m1(vector, scalar, vl) __riscv_vfredsum_vs_f32m2_f32m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m4_f32m1(vector, scalar, vl) __riscv_vfredsum_vs_f32m4_f32m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m8_f32m1(vector, scalar, vl) __riscv_vfredsum_vs_f32m8_f32m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m1_f64m1(vector, scalar, vl) __riscv_vfredsum_vs_f64m1_f64m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m2_f64m1(vector, scalar, vl) __riscv_vfredsum_vs_f64m2_f64m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m4_f64m1(vector, scalar, vl) __riscv_vfredsum_vs_f64m4_f64m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m8_f64m1(vector, scalar, vl) __riscv_vfredsum_vs_f64m8_f64m1(vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m1_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m2_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m4_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m8_f16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m1_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m2_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m4_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m8_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m1_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m2_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m4_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfredusum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m8_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m8_f32m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m1_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1(vector, scalar, vl) -#define __riscv_vredand_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1(vector, scalar, vl) -#define __riscv_vredand_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1(vector, scalar, vl) -#define __riscv_vredand_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1(vector, scalar, vl) -#define __riscv_vredand_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1(vector, scalar, vl) -#define __riscv_vredand_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1(vector, scalar, vl) -#define __riscv_vredand_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1(vector, scalar, vl) -#define __riscv_vredand_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1(vector, scalar, vl) -#define __riscv_vredand_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1(vector, scalar, vl) -#define __riscv_vredand_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1(vector, scalar, vl) -#define __riscv_vredand_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1(vector, scalar, vl) -#define __riscv_vredand_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1(vector, scalar, vl) -#define __riscv_vredand_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1(vector, scalar, vl) -#define __riscv_vredand_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1(vector, scalar, vl) -#define __riscv_vredand_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1(vector, scalar, vl) -#define __riscv_vredand_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredand_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1(vector, scalar, vl) -#define __riscv_vredand_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1(vector, scalar, vl) -#define __riscv_vredand_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1(vector, scalar, vl) -#define __riscv_vredand_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1(vector, scalar, vl) -#define __riscv_vredand_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1(vector, scalar, vl) -#define __riscv_vredand_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1(vector, scalar, vl) -#define __riscv_vredand_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1(vector, scalar, vl) -#define __riscv_vredand_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1(vector, scalar, vl) -#define __riscv_vredand_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1(vector, scalar, vl) -#define __riscv_vredand_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1(vector, scalar, vl) -#define __riscv_vredand_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1(vector, scalar, vl) -#define __riscv_vredand_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1(vector, scalar, vl) -#define __riscv_vredand_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1(vector, scalar, vl) -#define __riscv_vredand_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1(vector, scalar, vl) -#define __riscv_vredand_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1(vector, scalar, vl) -#define __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredand_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredand_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredmax_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmax_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredmin_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredmin_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredminu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredminu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1(vector, scalar, vl) -#define __riscv_vredor_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1(vector, scalar, vl) -#define __riscv_vredor_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1(vector, scalar, vl) -#define __riscv_vredor_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1(vector, scalar, vl) -#define __riscv_vredor_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1(vector, scalar, vl) -#define __riscv_vredor_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1(vector, scalar, vl) -#define __riscv_vredor_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1(vector, scalar, vl) -#define __riscv_vredor_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1(vector, scalar, vl) -#define __riscv_vredor_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1(vector, scalar, vl) -#define __riscv_vredor_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1(vector, scalar, vl) -#define __riscv_vredor_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1(vector, scalar, vl) -#define __riscv_vredor_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1(vector, scalar, vl) -#define __riscv_vredor_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1(vector, scalar, vl) -#define __riscv_vredor_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1(vector, scalar, vl) -#define __riscv_vredor_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1(vector, scalar, vl) -#define __riscv_vredor_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredor_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1(vector, scalar, vl) -#define __riscv_vredor_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1(vector, scalar, vl) -#define __riscv_vredor_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1(vector, scalar, vl) -#define __riscv_vredor_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1(vector, scalar, vl) -#define __riscv_vredor_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1(vector, scalar, vl) -#define __riscv_vredor_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1(vector, scalar, vl) -#define __riscv_vredor_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1(vector, scalar, vl) -#define __riscv_vredor_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1(vector, scalar, vl) -#define __riscv_vredor_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1(vector, scalar, vl) -#define __riscv_vredor_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1(vector, scalar, vl) -#define __riscv_vredor_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1(vector, scalar, vl) -#define __riscv_vredor_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1(vector, scalar, vl) -#define __riscv_vredor_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1(vector, scalar, vl) -#define __riscv_vredor_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1(vector, scalar, vl) -#define __riscv_vredor_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1(vector, scalar, vl) -#define __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredsum_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredsum_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i8m1_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i8m2_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i8m4_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i8m8_i8m1(vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i16m1_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i16m2_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i16m4_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i16m8_i16m1(vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i32m1_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i32m2_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i32m4_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i32m8_i32m1(vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i64m1_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i64m2_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i64m4_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i64m8_i64m1(vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u8m1_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u8m2_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u8m4_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u8m8_u8m1(vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u16m1_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u16m2_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u16m4_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u16m8_u16m1(vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u32m1_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u32m2_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u32m4_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u32m8_u32m1(vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u64m1_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u64m2_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u64m4_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1(vector, scalar, vl) -#define __riscv_vredxor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vredxor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m1_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m2_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m4_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m8_i16m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m1_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m2_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m4_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m8_i32m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m1_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m2_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m4_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m8_i64m1(vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1(vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i8m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i16m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsum_vs_i32m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8mf2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16mf2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32mf2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1_mu(mask, dest, vector, scalar, vl) -#define __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1_mu(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m1_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m2_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m4_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m8_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m1_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m2_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m4_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m8_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m1_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m2_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m4_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m8_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m1_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m2_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m4_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f16m8_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m1_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m2_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m4_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f32m8_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m1_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m2_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m4_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmax_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmax_vs_f64m8_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m1_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m2_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m4_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m8_f16m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m1_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m2_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m4_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m8_f32m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m1_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m2_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m4_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m8_f64m1(dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m1_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m2_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m4_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f16m8_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m1_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m2_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m4_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f32m8_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m1_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m2_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m4_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredmin_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredmin_vs_f64m8_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m1_f16m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m2_f16m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m4_f16m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m8_f16m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m1_f32m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m2_f32m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m4_f32m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m8_f32m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m1_f64m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m2_f64m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m4_f64m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m8_f64m1(dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m1_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m2_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m4_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f16m8_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m1_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m2_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m4_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f32m8_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m1_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m2_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m4_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredosum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredosum_vs_f64m8_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m1_f16m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m2_f16m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m4_f16m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m8_f16m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m1_f32m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m2_f32m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m4_f32m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m8_f32m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m1_f64m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m2_f64m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m4_f64m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m8_f64m1(dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m1_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m2_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m4_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f16m8_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m1_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m2_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m4_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f32m8_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m1_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m2_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m4_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredsum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfredsum_vs_f64m8_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m1_f16m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m1_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m2_f16m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m2_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m4_f16m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m4_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m8_f16m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m8_f16m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m1_f32m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m1_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m2_f32m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m2_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m4_f32m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m4_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m8_f32m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m8_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m1_f64m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m1_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m2_f64m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m2_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m4_f64m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m4_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m8_f64m1(dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m8_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m1_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m2_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m4_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f16m8_f16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m1_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m2_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m4_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f32m8_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m1_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m2_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m4_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfredusum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_vfredsum_vs_f64m8_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m1_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m2_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m4_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m8_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m1_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m2_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m4_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m8_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m1_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m2_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m4_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f16m8_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m1_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m2_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m4_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredosum_vs_f32m8_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m1_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m2_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m4_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m8_f32m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m1_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m2_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m4_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m8_f64m1(dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1_tu(dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m1_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m2_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m4_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f16m8_f32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m1_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m2_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m4_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vfwredsum_vs_f32m8_f64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m1_i8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m2_i8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m4_i8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m8_i8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m1_i16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m2_i16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m4_i16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m8_i16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m1_i32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m2_i32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m4_i32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m8_i32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m1_i64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m2_i64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m4_i64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m8_i64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m1_u8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m2_u8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m4_u8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m8_u8m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m1_u16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m2_u16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m4_u16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m8_u16m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m1_u32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m2_u32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m4_u32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m8_u32m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m1_u64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m2_u64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m4_u64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m8_u64m1(dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m1_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m2_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m4_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i8m8_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m1_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m2_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m4_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i16m8_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m1_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m2_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m4_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i32m8_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m1_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m2_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m4_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_i64m8_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m1_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m2_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m4_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u8m8_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m1_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m2_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m4_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u16m8_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m1_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m2_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m4_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u32m8_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m1_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m2_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m4_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredand_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredand_vs_u64m8_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m1_i8m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m2_i8m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m4_i8m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m8_i8m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m1_i16m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m2_i16m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m4_i16m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m8_i16m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m1_i32m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m2_i32m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m4_i32m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m8_i32m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m1_i64m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m2_i64m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m4_i64m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m8_i64m1(dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m1_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m2_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m4_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i8m8_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m1_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m2_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m4_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i16m8_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m1_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m2_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m4_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i32m8_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m1_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m2_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m4_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmax_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmax_vs_i64m8_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m1_u8m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m2_u8m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m4_u8m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m8_u8m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m1_u16m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m2_u16m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m4_u16m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m8_u16m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m1_u32m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m2_u32m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m4_u32m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m8_u32m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m1_u64m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m2_u64m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m4_u64m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m8_u64m1(dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m1_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m2_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m4_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u8m8_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m1_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m2_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m4_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u16m8_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m1_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m2_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m4_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u32m8_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m1_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m2_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m4_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmaxu_vs_u64m8_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m1_i8m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m2_i8m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m4_i8m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m8_i8m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m1_i16m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m2_i16m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m4_i16m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m8_i16m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m1_i32m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m2_i32m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m4_i32m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m8_i32m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m1_i64m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m2_i64m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m4_i64m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m8_i64m1(dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m1_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m2_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m4_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i8m8_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m1_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m2_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m4_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i16m8_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m1_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m2_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m4_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i32m8_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m1_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m2_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m4_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredmin_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredmin_vs_i64m8_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m1_u8m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m2_u8m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m4_u8m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m8_u8m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m1_u16m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m2_u16m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m4_u16m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m8_u16m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m1_u32m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m2_u32m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m4_u32m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m8_u32m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m1_u64m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m2_u64m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m4_u64m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m8_u64m1(dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m1_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m2_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m4_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u8m8_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m1_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m2_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m4_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u16m8_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m1_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m2_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m4_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u32m8_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m1_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m2_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m4_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredminu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredminu_vs_u64m8_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m1_i8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m2_i8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m4_i8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m8_i8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m1_i16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m2_i16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m4_i16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m8_i16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m1_i32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m2_i32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m4_i32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m8_i32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m1_i64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m2_i64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m4_i64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m8_i64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m1_u8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m2_u8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m4_u8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m8_u8m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m1_u16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m2_u16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m4_u16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m8_u16m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m1_u32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m2_u32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m4_u32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m8_u32m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m1_u64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m2_u64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m4_u64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m8_u64m1(dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m1_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m2_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m4_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i8m8_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m1_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m2_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m4_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i16m8_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m1_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m2_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m4_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i32m8_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m1_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m2_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m4_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_i64m8_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m1_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m2_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m4_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u8m8_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m1_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m2_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m4_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u16m8_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m1_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m2_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m4_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u32m8_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m1_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m2_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m4_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredor_vs_u64m8_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m1_i8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m2_i8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m4_i8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m8_i8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m1_i16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m2_i16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m4_i16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m8_i16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m1_i32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m2_i32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m4_i32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m8_i32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m1_i64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m2_i64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m4_i64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m8_i64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m1_u8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m2_u8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m4_u8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m8_u8m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m1_u16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m2_u16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m4_u16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m8_u16m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m1_u32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m2_u32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m4_u32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m8_u32m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m1_u64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m2_u64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m4_u64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m8_u64m1(dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m1_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m2_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m4_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i8m8_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m1_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m2_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m4_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i16m8_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m1_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m2_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m4_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i32m8_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m1_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m2_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m4_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_i64m8_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m1_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m2_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m4_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u8m8_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m1_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m2_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m4_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u16m8_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m1_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m2_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m4_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u32m8_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m1_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m2_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m4_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredsum_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredsum_vs_u64m8_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m1_i8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m2_i8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m4_i8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m8_i8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m1_i16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m2_i16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m4_i16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m8_i16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m1_i32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m2_i32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m4_i32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m8_i32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m1_i64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m2_i64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m4_i64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m8_i64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m1_u8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m2_u8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m4_u8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m8_u8m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m1_u16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m2_u16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m4_u16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m8_u16m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m1_u32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m2_u32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m4_u32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m8_u32m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m1_u64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m2_u64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m4_u64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m8_u64m1(dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m1_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m2_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m4_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i8m8_i8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m1_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m2_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m4_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i16m8_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m1_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m2_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m4_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i32m8_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m1_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m2_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m4_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_i64m8_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m1_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m2_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m4_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u8m8_u8m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m1_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m2_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m4_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u16m8_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m1_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m2_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m4_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u32m8_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m1_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m2_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m4_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vredxor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vredxor_vs_u64m8_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m1_i16m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m2_i16m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m4_i16m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m8_i16m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m1_i32m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m2_i32m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m4_i32m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m8_i32m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m1_i64m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m2_i64m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m4_i64m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m8_i64m1(dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m1_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m1_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m2_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m2_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m4_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m4_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i8m8_i16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i8m8_i16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m1_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m1_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m2_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m2_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m4_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m4_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i16m8_i32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i16m8_i32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m1_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m1_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m2_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m2_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m4_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m4_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsum_vs_i32m8_i64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsum_vs_i32m8_i64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m1_u16m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m2_u16m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m4_u16m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m8_u16m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m1_u32m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m2_u32m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m4_u32m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m8_u32m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m1_u64m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m2_u64m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m4_u64m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m8_u64m1(dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1_tu(dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m1_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m2_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m4_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u8m8_u16m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m1_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m2_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m4_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u16m8_u32m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m1_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m2_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m4_u64m1_tum(mask, dest, vector, scalar, vl) +#define __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, dest, vector, scalar, vl) __riscv_th_vwredsumu_vs_u32m8_u64m1_tum(mask, dest, vector, scalar, vl) }] in def th_vector_reduction_operations_wrapper_macros: RVVHeader; From f7461f21fdf801c0dcc1bb2535612c87ff387afa Mon Sep 17 00:00:00 2001 From: imkiva Date: Tue, 4 Jun 2024 16:04:47 +0800 Subject: [PATCH 11/12] [Clang][XTHeadVector] fix wrapper tests for vector-reduction (TUMU) --- .../vector-reduction/wrappers/vfredmax.c | 192 +++---- .../vector-reduction/wrappers/vfredmin.c | 192 +++---- .../vector-reduction/wrappers/vfredosum.c | 192 +++---- .../vector-reduction/wrappers/vfredsum.c | 192 +++---- .../vector-reduction/wrappers/vfwredosum.c | 128 ++--- .../vector-reduction/wrappers/vfwredsum.c | 128 ++--- .../vector-reduction/wrappers/vredand.c | 512 +++++++++--------- .../vector-reduction/wrappers/vredmax.c | 256 ++++----- .../vector-reduction/wrappers/vredmaxu.c | 256 ++++----- .../vector-reduction/wrappers/vredmin.c | 256 ++++----- .../vector-reduction/wrappers/vredminu.c | 256 ++++----- .../vector-reduction/wrappers/vredor.c | 512 +++++++++--------- .../vector-reduction/wrappers/vredsum.c | 512 +++++++++--------- .../vector-reduction/wrappers/vredxor.c | 512 +++++++++--------- .../vector-reduction/wrappers/vwredsum.c | 192 +++---- .../vector-reduction/wrappers/vwredsumu.c | 312 ++++------- 16 files changed, 2240 insertions(+), 2360 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c index 6abd88d24c710..845974d7f7b4c 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmax.c @@ -7,242 +7,242 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m1_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m2_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m4_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m8_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m1_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m2_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m4_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m8_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m1_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m2_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m4_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m8_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m1_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m2_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m4_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f16m8_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m1_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m2_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m4_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f32m8_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m1_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m2_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m4_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmax_vs_f64m8_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmax.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmax_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmax_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c index 2b737e5ad2384..5f7c6014c4f6e 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredmin.c @@ -7,242 +7,242 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m1_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m2_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m4_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m8_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m1_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m2_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m4_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m8_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m1_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m2_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m4_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m8_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m1_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m2_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m4_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f16m8_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m1_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m2_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m4_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f32m8_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m1_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m2_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m4_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredmin_vs_f64m8_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredmin.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredmin_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredmin_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c index c68aad592dd91..ea5d048e79217 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredosum.c @@ -7,241 +7,241 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m1_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m2_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m4_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m8_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m1_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m2_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m4_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m8_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m1_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m2_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m4_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m8_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m1_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m2_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m4_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f16m8_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m1_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m2_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m4_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f32m8_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m1_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m2_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m4_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredosum_vs_f64m8_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredosum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredosum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredosum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c index 1f95790425566..f3b5557e900ee 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfredsum.c @@ -7,241 +7,241 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m1_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m1_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m1_f16m1(vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m1_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m2_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m2_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m2_f16m1(vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m2_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m4_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m4_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m4_f16m1(vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m4_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m8_f16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m8_f16m1(vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m8_f16m1(vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m8_f16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m1_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1(vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m1_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m2_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1(vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m2_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m4_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1(vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m4_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m8_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1(vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m8_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m1_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1(vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m1_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m2_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1(vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m2_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m4_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1(vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m4_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m8_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1(vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m8_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m1_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m1_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m2_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m2_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m4_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m4_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f16m8_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv4f16.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f16m8_f16m1_m(mask, vector, scalar, vl); +vfloat16m1_t test_vfredsum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m1_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m2_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m4_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f32m8_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv2f32.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f32m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfredsum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m1_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv1f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m2_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv2f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv2f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m4_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv4f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv4f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfredsum_vs_f64m8_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv8f64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfredsum.mask.nxv1f64.nxv8f64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfredsum_vs_f64m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfredsum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfredsum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c index 4dcf67987a631..c629fd09efca6 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredosum.c @@ -7,161 +7,161 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m1_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m2_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m4_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m8_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv2f32.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m1_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m2_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m4_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m8_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.nxv1f64.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m1_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m2_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m4_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f16m8_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m1_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m2_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m4_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredosum_vs_f32m8_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredosum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredosum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c index 569718415ff7c..c0b075f8c3847 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vfwredsum.c @@ -7,161 +7,161 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m1_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m1_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1(vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m1_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m2_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m2_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1(vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m2_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m4_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m4_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1(vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m4_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m8_f32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv2f32.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m8_f32m1(vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1(vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m8_f32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m1_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m1_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1(vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m1_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m2_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m2_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1(vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m2_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m4_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m4_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1(vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m4_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m8_f64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.nxv1f64.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m8_f64m1(vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1(vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m8_f64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m1_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv4f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv4f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m2_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv8f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv8f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m4_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv16f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv16f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f16m8_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv32f16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv2f32.nxv32f16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, vector, scalar, vl); +vfloat32m1_t test_vfwredsum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m1_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv2f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv2f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m2_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv4f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv4f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m4_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv8f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv8f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwredsum_vs_f32m8_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv16f32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwredsum.mask.nxv1f64.nxv16f32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { - return __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, vector, scalar, vl); +vfloat64m1_t test_vfwredsum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) { + return __riscv_vfwredsum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c index e47ecafe91dde..9e8079359f76d 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredand.c @@ -7,642 +7,642 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m1_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m2_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m4_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m8_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m1_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m2_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m4_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m8_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m1_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m2_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m4_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m8_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m1_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m2_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m4_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m8_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m1_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m2_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m4_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m8_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m1_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m1_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m8_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m1_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m4_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m8_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m1_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m2_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m4_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i8m8_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m1_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m2_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m4_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i16m8_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m1_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m2_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m4_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i32m8_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m1_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m2_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m4_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_i64m8_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m1_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m2_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m4_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u8m8_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m1_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u16m8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m1_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u32m8_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m1_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m4_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredand_vs_u64m8_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredand.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredand_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredand_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c index 7d606b2dca6b9..de3ff5524703e 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmax.c @@ -7,322 +7,322 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m1_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m2_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m4_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m8_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m1_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m2_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m4_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m8_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m1_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m2_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m4_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m8_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m1_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m2_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m4_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m8_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m1_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m2_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m4_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i8m8_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m1_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m2_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m4_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i16m8_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m1_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m2_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m4_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i32m8_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m1_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m2_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m4_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmax_vs_i64m8_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmax.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmax_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmax_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c index 7126641581948..9923c3f0f6c01 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmaxu.c @@ -7,322 +7,322 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m1_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m2_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m4_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m8_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m1_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m1_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m8_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m1_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m4_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m8_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m1_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m2_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m4_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u8m8_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m1_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u16m8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m1_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u32m8_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m1_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m4_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmaxu_vs_u64m8_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmaxu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredmaxu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c index 1f96c3c5a2aee..29251f32d589e 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredmin.c @@ -7,322 +7,322 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m1_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m2_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m4_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m8_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m1_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m2_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m4_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m8_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m1_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m2_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m4_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m8_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m1_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m2_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m4_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m8_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m1_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m2_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m4_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i8m8_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m1_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m2_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m4_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i16m8_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m1_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m2_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m4_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i32m8_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m1_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m2_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m4_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredmin_vs_i64m8_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredmin.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredmin_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredmin_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c index bcdb6ed8df7d2..ec8735dd7a8fb 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredminu.c @@ -7,322 +7,322 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m1_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m2_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m4_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m8_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m1_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m1_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m8_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m1_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m4_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m8_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m1_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m2_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m4_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u8m8_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m1_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u16m8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m1_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u32m8_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m1_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m4_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredminu_vs_u64m8_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredminu.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredminu_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredminu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c index 6e069dcb3405b..734221ba71108 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredor.c @@ -7,642 +7,642 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m1_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m2_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m4_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m8_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m1_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m2_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m4_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m8_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m1_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m2_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m4_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m8_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m1_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m2_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m4_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m8_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m1_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m2_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m4_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m8_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m1_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m1_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m8_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m1_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m4_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m8_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m1_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m2_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m4_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i8m8_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m1_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m2_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m4_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i16m8_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m1_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m2_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m4_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i32m8_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m1_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m2_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m4_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_i64m8_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m1_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m2_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m4_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u8m8_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m1_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u16m8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m1_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u32m8_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m1_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m4_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredor_vs_u64m8_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c index 5d1d120e1c858..a39fb395e10f4 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredsum.c @@ -7,642 +7,642 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m1_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m2_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m4_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m8_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m1_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m2_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m4_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m8_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m1_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m2_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m4_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m8_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m1_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m2_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m4_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m8_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m1_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m2_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m4_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m8_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m1_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m1_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m8_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m1_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m4_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m8_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m1_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m2_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m4_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i8m8_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m1_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m2_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m4_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i16m8_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m1_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m2_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m4_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i32m8_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m1_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m2_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m4_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_i64m8_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m1_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m2_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m4_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u8m8_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m1_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u16m8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m1_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u32m8_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m1_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m4_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredsum_vs_u64m8_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredsum.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredsum_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredsum_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c index 50d04b461c05f..6d610ed11079b 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vredxor.c @@ -7,642 +7,642 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m1_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m1_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m2_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m2_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m4_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m4_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m8_i8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m8_i8m1(vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m1_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m1_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m2_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m2_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m4_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m4_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m8_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m8_i16m1(vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m1_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m1_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m2_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m2_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m4_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m4_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m8_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m8_i32m1(vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m1_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m1_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m2_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m2_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m4_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m4_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m8_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m8_i64m1(vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m1_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m1_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m2_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m2_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m4_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m4_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m8_u8m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m8_u8m1(vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m1_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m8_u16m1(vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m1_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m8_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m8_u32m1(vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m1_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m4_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m8_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m8_u64m1(vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m1_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m1_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m2_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m2_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m2_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m4_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m4_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m4_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i8m8_i8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m8_t vector, vint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i8m8_i8m1_m(mask, vector, scalar, vl); +vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m1_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m2_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m4_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i16m8_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i16m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m1_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m2_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m4_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i32m8_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i32m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m1_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m2_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m4_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_i64m8_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_i64m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m1_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m1_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m2_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m2_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m4_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m4_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u8m8_u8m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv8i8.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u8m8_u8m1_m(mask, vector, scalar, vl); +vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m1_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u16m8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv4i16.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u16m8_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m1_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u32m8_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv2i32.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u32m8_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m1_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv1i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv2i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m4_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv4i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vredxor_vs_u64m8_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vredxor.mask.nxv1i64.nxv8i64.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vredxor_vs_u64m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vredxor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c index e42e2b07d3f9c..149b8165015a1 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsum.c @@ -7,242 +7,242 @@ #include // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m1_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m1_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m2_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m2_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m4_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m4_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m8_i16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv4i16.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m8_i16m1(vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m1_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m1_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m2_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m2_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m4_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m4_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m8_i32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv2i32.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m8_i32m1(vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m1_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m1_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m2_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m2_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m4_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m4_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m8_i64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.nxv1i64.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m8_i64m1(vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m1_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint8m1_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m1_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m1_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m2_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint8m2_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m2_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m2_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m4_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint8m4_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m4_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m4_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i8m8_i16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint8m8_t vector, vint16m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i8m8_i16m1_m(mask, vector, scalar, vl); +vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i8m8_i16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m1_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint16m1_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m1_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m1_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m2_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint16m2_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m2_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m2_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m4_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint16m4_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m4_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m4_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i16m8_i32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint16m8_t vector, vint32m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i16m8_i32m1_m(mask, vector, scalar, vl); +vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i16m8_i32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m1_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint32m1_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m1_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m1_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m2_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint32m2_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m2_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m2_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m4_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint32m4_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m4_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m4_i64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsum_vs_i32m8_i64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsum.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint32m8_t vector, vint64m1_t scalar, size_t vl) { - return __riscv_vwredsum_vs_i32m8_i64m1_m(mask, vector, scalar, vl); +vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) { + return __riscv_vwredsum_vs_i32m8_i64m1_m(mask, maskedoff, vector, scalar, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c index 6e64776e6cfc7..d830640cf4c56 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-reduction/wrappers/vwredsumu.c @@ -6,363 +6,243 @@ #include -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv1i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv2i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv4i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl); -} - // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m1_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m1_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m2_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m2_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m4_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m4_u16m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m8_u16m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv4i16.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m8_u16m1(vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv1i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv2i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m1_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m1_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m2_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m2_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m4_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m4_u32m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m8_u32m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv2i32.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m8_u32m1(vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32mf2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv1i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m1_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m1_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m2_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m2_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m4_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m4_u64m1(vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m8_u64m1 -// CHECK-RV64-SAME: ( [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.nxv1i64.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m8_u64m1(vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv1i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf8_u16m1_m(mask, vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv2i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf4_u16m1_m(mask, vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8mf2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv4i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8mf2_u16m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1(maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m1_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv8i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv8i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m1_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m2_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv16i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv16i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m2_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m4_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv32i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv32i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m4_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u8m8_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv64i8.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv4i16.nxv64i8.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv1i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf4_u32m1_m(mask, vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16mf2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv2i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16mf2_u32m1_m(mask, vector, scalar, vl); +vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u8m8_u16m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m1_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv4i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv4i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m1_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m2_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv8i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv8i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m2_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m4_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv16i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv16i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m4_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u16m8_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv32i16.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv2i32.nxv32i16.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, vector, scalar, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32mf2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv1i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32mf2_u64m1_m(mask, vector, scalar, vl); +vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u16m8_u32m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m1_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv2i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv2i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m1_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m2_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv4i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv4i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m2_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m4_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv8i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv8i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m4_u64m1_m(mask, maskedoff, vector, scalar, vl); } // CHECK-RV64-LABEL: define dso_local @test_vwredsumu_vs_u32m8_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[VECTOR:%.*]], [[SCALAR:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv16i32.i64( poison, [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vwredsumu.mask.nxv1i64.nxv16i32.i64( [[MASKEDOFF]], [[VECTOR]], [[SCALAR]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { - return __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, vector, scalar, vl); +vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) { + return __riscv_vwredsumu_vs_u32m8_u64m1_m(mask, maskedoff, vector, scalar, vl); } From 5a2db6b69cc7b67fff5e1fbb839f19bf5de97298 Mon Sep 17 00:00:00 2001 From: imkiva Date: Wed, 5 Jun 2024 13:56:39 +0800 Subject: [PATCH 12/12] [Clang][XTHeadVector] fix wrapper tests for vector-floating (TAMU) --- .../vector-floating/wrappers/vfabs.c | 96 ++++---- .../vector-floating/wrappers/vfadd.c | 192 ++++++++-------- .../vector-floating/wrappers/vfclass.c | 96 ++++---- .../vector-floating/wrappers/vfdiv.c | 192 ++++++++-------- .../vector-floating/wrappers/vfmax.c | 192 ++++++++-------- .../vector-floating/wrappers/vfmin.c | 192 ++++++++-------- .../vector-floating/wrappers/vfmul.c | 192 ++++++++-------- .../vector-floating/wrappers/vfneg.c | 96 ++++---- .../vector-floating/wrappers/vfrdiv.c | 96 ++++---- .../vector-floating/wrappers/vfrsub.c | 96 ++++---- .../vector-floating/wrappers/vfsgnj.c | 192 ++++++++-------- .../vector-floating/wrappers/vfsgnjn.c | 192 ++++++++-------- .../vector-floating/wrappers/vfsgnjx.c | 193 ++++++++-------- .../vector-floating/wrappers/vfsqrt.c | 96 ++++---- .../vector-floating/wrappers/vfsub.c | 192 ++++++++-------- .../vector-floating/wrappers/vfwadd.c | 208 +++++++++--------- .../vector-floating/wrappers/vfwmul.c | 98 ++++----- .../vector-floating/wrappers/vfwsub.c | 208 +++++++++--------- .../vector-floating/wrappers/vmfeq.c | 192 ++++++++-------- .../vector-floating/wrappers/vmfge.c | 192 ++++++++-------- .../vector-floating/wrappers/vmfgt.c | 192 ++++++++-------- .../vector-floating/wrappers/vmfle.c | 193 ++++++++-------- .../vector-floating/wrappers/vmflt.c | 193 ++++++++-------- .../vector-floating/wrappers/vmfne.c | 192 ++++++++-------- 24 files changed, 1980 insertions(+), 1993 deletions(-) diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfabs.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfabs.c index a4dda64f742d0..94d666d5d3e3c 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfabs.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfabs.c @@ -128,122 +128,122 @@ vfloat64m8_t test_vfabs_v_f64m8(vfloat64m8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfabs_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfabs_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return __riscv_vfabs_v_f16m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfabs_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfabs_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return __riscv_vfabs_v_f16m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfabs_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfabs_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return __riscv_vfabs_v_f16m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfabs_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfabs_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return __riscv_vfabs_v_f16m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfabs_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfabs_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return __riscv_vfabs_v_f32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfabs_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfabs_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return __riscv_vfabs_v_f32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfabs_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfabs_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return __riscv_vfabs_v_f32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfabs_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfabs_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return __riscv_vfabs_v_f32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfabs_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfabs_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return __riscv_vfabs_v_f64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfabs_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfabs_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return __riscv_vfabs_v_f64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfabs_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfabs_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return __riscv_vfabs_v_f64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfabs_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfabs_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfabs_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return __riscv_vfabs_v_f64m8_m(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfadd.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfadd.c index d351083b68509..c7641b38ce6f3 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfadd.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfadd.c @@ -248,241 +248,241 @@ vfloat64m8_t test_vfadd_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfadd_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfadd_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfadd_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfadd_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfadd_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfadd_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfadd_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfadd_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfadd_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfadd_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfadd_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfadd_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfadd_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfadd_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfadd_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfadd_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfadd_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfadd_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfadd_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfadd_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfadd_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfadd_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfadd.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfadd_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfclass.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfclass.c index eaed3dda8faf6..e1ce01e251be9 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfclass.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfclass.c @@ -128,122 +128,122 @@ vuint64m8_t test_vfclass_v_u64m8(vfloat64m8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv4f16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfclass_v_u16m1_m(mask, op1, vl); +vuint16m1_t test_vfclass_v_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return __riscv_vfclass_v_u16m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv8f16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfclass_v_u16m2_m(mask, op1, vl); +vuint16m2_t test_vfclass_v_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return __riscv_vfclass_v_u16m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv16f16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfclass_v_u16m4_m(mask, op1, vl); +vuint16m4_t test_vfclass_v_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return __riscv_vfclass_v_u16m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv32f16.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfclass_v_u16m8_m(mask, op1, vl); +vuint16m8_t test_vfclass_v_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return __riscv_vfclass_v_u16m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv2f32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfclass_v_u32m1_m(mask, op1, vl); +vuint32m1_t test_vfclass_v_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return __riscv_vfclass_v_u32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv4f32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfclass_v_u32m2_m(mask, op1, vl); +vuint32m2_t test_vfclass_v_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return __riscv_vfclass_v_u32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv8f32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfclass_v_u32m4_m(mask, op1, vl); +vuint32m4_t test_vfclass_v_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return __riscv_vfclass_v_u32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv16f32.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfclass_v_u32m8_m(mask, op1, vl); +vuint32m8_t test_vfclass_v_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return __riscv_vfclass_v_u32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv1f64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfclass_v_u64m1_m(mask, op1, vl); +vuint64m1_t test_vfclass_v_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return __riscv_vfclass_v_u64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv2f64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfclass_v_u64m2_m(mask, op1, vl); +vuint64m2_t test_vfclass_v_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return __riscv_vfclass_v_u64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv4f64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfclass_v_u64m4_m(mask, op1, vl); +vuint64m4_t test_vfclass_v_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return __riscv_vfclass_v_u64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfclass_v_u64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv8f64.i64( poison, [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfclass.mask.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfclass_v_u64m8_m(mask, op1, vl); +vuint64m8_t test_vfclass_v_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return __riscv_vfclass_v_u64m8_m(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfdiv.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfdiv.c index b1550febcf3b5..73eaf4f74ba6d 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfdiv.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfdiv.c @@ -248,242 +248,242 @@ vfloat64m8_t test_vfdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfdiv_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfdiv_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfdiv_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfdiv_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfdiv_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfdiv_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfdiv_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfdiv_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfdiv_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfdiv_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfdiv_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfdiv_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfdiv_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfdiv_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfdiv_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfdiv_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfdiv_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfdiv_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfdiv_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfdiv_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfdiv_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfdiv_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfdiv_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfdiv_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfdiv_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfdiv_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfdiv_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfdiv_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfdiv_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfdiv_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfdiv_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfdiv_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfdiv_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfdiv_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfdiv_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfdiv_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfdiv_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfdiv_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmax.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmax.c index 52ec1cd6498c9..a341c98fcf6e1 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmax.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmax.c @@ -248,242 +248,242 @@ vfloat64m8_t test_vfmax_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmax_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfmax_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmax_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmax_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmax_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmax_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfmax_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmax_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmax_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmax_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmax_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfmax_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmax_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmax_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmax_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmax_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfmax_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmax_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmax_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmax_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmax_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfmax_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfmax_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmax_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfmax_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmax_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfmax_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfmax_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmax_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfmax_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmax_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfmax_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfmax_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmax_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfmax_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmax_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfmax_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfmax_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmax_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfmax_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmax_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfmax_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfmax_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmax_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfmax_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmax_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfmax_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfmax_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmax_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfmax_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmax_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfmax_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfmax_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmax_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfmax_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmax_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmax_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfmax_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmax_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmax.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfmax_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmax_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfmax_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmin.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmin.c index 6480296e5ff07..401b16f043e07 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmin.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmin.c @@ -248,242 +248,242 @@ vfloat64m8_t test_vfmin_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmin_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfmin_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmin_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmin_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmin_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmin_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfmin_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmin_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmin_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmin_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmin_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfmin_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmin_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmin_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmin_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmin_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfmin_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmin_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmin_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmin_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmin_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfmin_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfmin_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmin_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfmin_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmin_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfmin_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfmin_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmin_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfmin_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmin_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfmin_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfmin_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmin_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfmin_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmin_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfmin_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfmin_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmin_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfmin_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmin_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfmin_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfmin_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmin_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfmin_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmin_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfmin_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfmin_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmin_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfmin_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmin_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfmin_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfmin_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmin_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfmin_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmin_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmin_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfmin_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmin_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmin.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfmin_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmin_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfmin_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmul.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmul.c index ed8ffb44722b8..87de777767df4 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmul.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfmul.c @@ -248,241 +248,241 @@ vfloat64m8_t test_vfmul_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmul_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfmul_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmul_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfmul_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmul_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmul_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfmul_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmul_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfmul_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmul_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmul_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfmul_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmul_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfmul_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmul_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmul_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfmul_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfmul_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfmul_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfmul_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmul_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfmul_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfmul_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfmul_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfmul_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfmul_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfmul_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfmul_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmul_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfmul_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfmul_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfmul_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfmul_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfmul_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfmul_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfmul_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfmul_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfmul.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfmul_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfmul_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfmul_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfneg.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfneg.c index d335724fef9d6..9d8da3c2fac32 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfneg.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfneg.c @@ -128,122 +128,122 @@ vfloat64m8_t test_vfneg_v_f64m8(vfloat64m8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfneg_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfneg_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return __riscv_vfneg_v_f16m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfneg_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfneg_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return __riscv_vfneg_v_f16m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfneg_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfneg_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return __riscv_vfneg_v_f16m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfneg_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfneg_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return __riscv_vfneg_v_f16m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfneg_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfneg_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return __riscv_vfneg_v_f32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfneg_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfneg_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return __riscv_vfneg_v_f32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfneg_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfneg_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return __riscv_vfneg_v_f32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfneg_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfneg_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return __riscv_vfneg_v_f32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfneg_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfneg_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return __riscv_vfneg_v_f64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfneg_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfneg_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return __riscv_vfneg_v_f64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfneg_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfneg_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return __riscv_vfneg_v_f64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfneg_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP1]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfneg_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfneg_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return __riscv_vfneg_v_f64m8_m(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrdiv.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrdiv.c index da53a09e8076b..ce8f02dca457a 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrdiv.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrdiv.c @@ -128,121 +128,121 @@ vfloat64m8_t test_vfrdiv_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfrdiv_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrdiv_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfrdiv_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrdiv_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfrdiv_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrdiv_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrdiv_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfrdiv_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrdiv_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfrdiv_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfrdiv_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfrdiv_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfrdiv_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfrdiv_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfrdiv_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfrdiv_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfrdiv_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfrdiv_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfrdiv_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfrdiv_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfrdiv_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfrdiv_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfrdiv_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfrdiv_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrdiv_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrdiv.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfrdiv_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfrdiv_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfrdiv_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrsub.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrsub.c index 2cdac3cf9f208..5b13d0a9c1e7a 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrsub.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfrsub.c @@ -128,121 +128,121 @@ vfloat64m8_t test_vfrsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrsub_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfrsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrsub_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfrsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrsub_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfrsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfrsub_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfrsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfrsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfrsub_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfrsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfrsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfrsub_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfrsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfrsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfrsub_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfrsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfrsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfrsub_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfrsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfrsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfrsub_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfrsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfrsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfrsub_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfrsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfrsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfrsub_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfrsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfrsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfrsub_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfrsub.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfrsub_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfrsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfrsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnj.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnj.c index b97f6372cdf46..8b338c2af2753 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnj.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnj.c @@ -248,242 +248,242 @@ vfloat64m8_t test_vfsgnj_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnj_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnj_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnj_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnj_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnj_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnj_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnj_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnj_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnj_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnj_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfsgnj_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnj_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfsgnj_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnj_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfsgnj_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfsgnj_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnj_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfsgnj_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnj_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfsgnj_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnj_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfsgnj_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnj_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfsgnj_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnj_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfsgnj_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnj_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnj.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfsgnj_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnj_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfsgnj_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjn.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjn.c index 3cf366024df9d..57a89b8f66e1c 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjn.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjn.c @@ -248,242 +248,242 @@ vfloat64m8_t test_vfsgnjn_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjn_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjn_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjn_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjn_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjn_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjn_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjn_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfsgnjn_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjn_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfsgnjn_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjn_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfsgnjn_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfsgnjn_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjn_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfsgnjn_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjn_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfsgnjn_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjn_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfsgnjn_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjn_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfsgnjn_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjn_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfsgnjn_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjn_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjn.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfsgnjn_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjn_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfsgnjn_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjx.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjx.c index adb3fc5fba7e3..9bdc4d51869ec 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjx.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsgnjx.c @@ -248,242 +248,241 @@ vfloat64m8_t test_vfsgnjx_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsgnjx_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsgnjx_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsgnjx_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsgnjx_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsgnjx_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsgnjx_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsgnjx_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfsgnjx_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsgnjx_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfsgnjx_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsgnjx_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfsgnjx_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfsgnjx_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsgnjx_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfsgnjx_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsgnjx_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfsgnjx_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsgnjx_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfsgnjx_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsgnjx_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfsgnjx_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsgnjx_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfsgnjx_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsgnjx_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsgnjx.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfsgnjx_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsgnjx_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfsgnjx_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsqrt.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsqrt.c index a24db296850b8..d41bd6c3d2d2d 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsqrt.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsqrt.c @@ -128,121 +128,121 @@ vfloat64m8_t test_vfsqrt_v_f64m8(vfloat64m8_t op1, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv4f16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m1_m(mask, op1, vl); +vfloat16m1_t test_vfsqrt_v_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, size_t vl) { + return __riscv_vfsqrt_v_f16m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv8f16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m2_m(mask, op1, vl); +vfloat16m2_t test_vfsqrt_v_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, size_t vl) { + return __riscv_vfsqrt_v_f16m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv16f16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m4_m(mask, op1, vl); +vfloat16m4_t test_vfsqrt_v_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, size_t vl) { + return __riscv_vfsqrt_v_f16m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv32f16.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f16m8_m(mask, op1, vl); +vfloat16m8_t test_vfsqrt_v_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, size_t vl) { + return __riscv_vfsqrt_v_f16m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv2f32.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m1_m(mask, op1, vl); +vfloat32m1_t test_vfsqrt_v_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, size_t vl) { + return __riscv_vfsqrt_v_f32m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv4f32.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m2_m(mask, op1, vl); +vfloat32m2_t test_vfsqrt_v_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, size_t vl) { + return __riscv_vfsqrt_v_f32m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv8f32.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m4_m(mask, op1, vl); +vfloat32m4_t test_vfsqrt_v_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, size_t vl) { + return __riscv_vfsqrt_v_f32m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv16f32.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f32m8_m(mask, op1, vl); +vfloat32m8_t test_vfsqrt_v_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, size_t vl) { + return __riscv_vfsqrt_v_f32m8_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv1f64.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m1_m(mask, op1, vl); +vfloat64m1_t test_vfsqrt_v_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, size_t vl) { + return __riscv_vfsqrt_v_f64m1_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv2f64.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m2_m(mask, op1, vl); +vfloat64m2_t test_vfsqrt_v_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, size_t vl) { + return __riscv_vfsqrt_v_f64m2_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv4f64.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m4_m(mask, op1, vl); +vfloat64m4_t test_vfsqrt_v_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, size_t vl) { + return __riscv_vfsqrt_v_f64m4_m(mask, maskedoff, op1, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsqrt_v_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv8f64.i64( poison, [[OP1]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsqrt.mask.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t op1, size_t vl) { - return __riscv_vfsqrt_v_f64m8_m(mask, op1, vl); +vfloat64m8_t test_vfsqrt_v_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, size_t vl) { + return __riscv_vfsqrt_v_f64m8_m(mask, maskedoff, op1, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsub.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsub.c index deb64dddfc377..e26023728eb6d 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsub.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfsub.c @@ -248,241 +248,241 @@ vfloat64m8_t test_vfsub_vf_f64m8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsub_vv_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfsub_vv_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f16m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsub_vf_f16m1_m(mask, op1, op2, vl); +vfloat16m1_t test_vfsub_vf_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsub_vf_f16m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsub_vv_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfsub_vv_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f16m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsub_vf_f16m2_m(mask, op1, op2, vl); +vfloat16m2_t test_vfsub_vf_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsub_vf_f16m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsub_vv_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfsub_vv_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f16m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsub_vf_f16m4_m(mask, op1, op2, vl); +vfloat16m4_t test_vfsub_vf_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsub_vf_f16m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsub_vv_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vfsub_vv_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f16m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfsub_vf_f16m8_m(mask, op1, op2, vl); +vfloat16m8_t test_vfsub_vf_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfsub_vf_f16m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfsub_vv_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfsub_vf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfsub_vf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfsub_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfsub_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vfsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vfsub_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vfsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vfsub_vv_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vfsub_vf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vfsub_vf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vfsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vfsub_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vfsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vfsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vfsub_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vfsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vfsub_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vfsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfsub_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfsub.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vfsub_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vfsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwadd.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwadd.c index 883e65a6252e9..01c01eab17eaa 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwadd.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwadd.c @@ -268,261 +268,261 @@ vfloat64m8_t test_vfwadd_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv2f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv2f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwadd_wf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwadd_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwadd_wf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfwadd_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwadd_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwadd_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f32.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfwadd_wv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwadd_wf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwadd_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwadd_wf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfwadd_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwadd_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwadd_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f32.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfwadd_wv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwadd_wf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwadd_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwadd_wf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfwadd_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwadd_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwadd_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv16f32.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfwadd_wv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv16f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv16f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwadd_wf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwadd_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwadd_wf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv1f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv1f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { - return __riscv_vfwadd_wf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwadd_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { + return __riscv_vfwadd_wf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfwadd_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfwadd_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfwadd_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv2f64.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfwadd_wv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv2f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv2f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { - return __riscv_vfwadd_wf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwadd_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { + return __riscv_vfwadd_wf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfwadd_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfwadd_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfwadd_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f64.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfwadd_wv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv4f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { - return __riscv_vfwadd_wf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwadd_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { + return __riscv_vfwadd_wf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfwadd_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfwadd_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfwadd_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f64.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwadd_wv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfwadd_wv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwadd_wf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwadd.w.mask.nxv8f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { - return __riscv_vfwadd_wf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwadd_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { + return __riscv_vfwadd_wf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwmul.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwmul.c index dad32f157cbdd..8147ff7171209 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwmul.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwmul.c @@ -128,121 +128,111 @@ vfloat64m8_t test_vfwmul_vf_f64m8(vfloat32m4_t op1, float op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwmul_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfwmul_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwmul_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwmul_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwmul_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwmul_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfwmul_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwmul_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwmul_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwmul_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwmul_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfwmul_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwmul_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwmul_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwmul_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwmul_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfwmul_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfwmul_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwmul_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfwmul_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwmul_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfwmul_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfwmul_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwmul_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfwmul_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwmul_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwmul_vv_f64m8_m(mask, op1, op2, vl); -} - -// CHECK-RV64-LABEL: define dso_local @test_vfwmul_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { -// CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwmul.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) -// CHECK-RV64-NEXT: ret [[TMP0]] -// -vfloat64m8_t test_vfwmul_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfwmul_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwmul_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfwmul_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwsub.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwsub.c index 637f0ef5530eb..ca37622725065 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwsub.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vfwsub.c @@ -268,261 +268,261 @@ vfloat64m8_t test_vfwsub_wf_f64m8(vfloat64m8_t op1, float op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f32m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv2f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv2f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwsub_wf_f32m1_m(mask, op1, op2, vl); +vfloat32m1_t test_vfwsub_wf_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwsub_wf_f32m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f32.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfwsub_vv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f32.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwsub_vf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwsub_vf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f32.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f32.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_wv_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vfwsub_wv_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f32m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwsub_wf_f32m2_m(mask, op1, op2, vl); +vfloat32m2_t test_vfwsub_wf_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwsub_wf_f32m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f32.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfwsub_vv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f32.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwsub_vf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwsub_vf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f32.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f32.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_wv_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vfwsub_wv_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f32m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwsub_wf_f32m4_m(mask, op1, op2, vl); +vfloat32m4_t test_vfwsub_wf_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwsub_wf_f32m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv16f32.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfwsub_vv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv16f32.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwsub_vf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwsub_vf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv16f32.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv16f32.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_wv_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vfwsub_wv_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f32m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv16f32.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv16f32.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vfwsub_wf_f32m8_m(mask, op1, op2, vl); +vfloat32m8_t test_vfwsub_wf_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vfwsub_wf_f32m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f64m1_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv1f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv1f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t op1, float op2, size_t vl) { - return __riscv_vfwsub_wf_f64m1_m(mask, op1, op2, vl); +vfloat64m1_t test_vfwsub_wf_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, float op2, size_t vl) { + return __riscv_vfwsub_wf_f64m1_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv2f64.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfwsub_vv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv2f64.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vfwsub_vf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vfwsub_vf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv2f64.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv2f64.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_wv_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vfwsub_wv_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f64m2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv2f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv2f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t op1, float op2, size_t vl) { - return __riscv_vfwsub_wf_f64m2_m(mask, op1, op2, vl); +vfloat64m2_t test_vfwsub_wf_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, float op2, size_t vl) { + return __riscv_vfwsub_wf_f64m2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f64.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfwsub_vv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv4f64.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vfwsub_vf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vfwsub_vf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f64.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f64.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_wv_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vfwsub_wv_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f64m4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv4f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t op1, float op2, size_t vl) { - return __riscv_vfwsub_wf_f64m4_m(mask, op1, op2, vl); +vfloat64m4_t test_vfwsub_wf_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, float op2, size_t vl) { + return __riscv_vfwsub_wf_f64m4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f64.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_vv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfwsub_vv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_vf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.mask.nxv8f64.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vfwsub_vf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vfwsub_vf_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wv_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f64.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f64.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vfwsub_wv_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_wv_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vfwsub_wv_f64m8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vfwsub_wf_f64m8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f64.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vfwsub.w.mask.nxv8f64.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 7, i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t op1, float op2, size_t vl) { - return __riscv_vfwsub_wf_f64m8_m(mask, op1, op2, vl); +vfloat64m8_t test_vfwsub_wf_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, float op2, size_t vl) { + return __riscv_vfwsub_wf_f64m8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfeq.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfeq.c index bc8c611c22354..0163cab6c49c3 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfeq.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfeq.c @@ -248,242 +248,242 @@ vbool8_t test_vmfeq_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfeq_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfeq_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfeq_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfeq_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfeq_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfeq_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfeq_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfeq_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfeq_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfeq_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vmfeq_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vmfeq_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vmfeq_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vmfeq_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vmfeq_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vmfeq_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vmfeq_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfeq_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vmfeq_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vmfeq_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vmfeq_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfeq_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vmfeq_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vmfeq_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vmfeq_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfeq_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vmfeq_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vmfeq_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vmfeq_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfeq_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vmfeq_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vv_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfeq_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vmfeq_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfeq_vf_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfeq.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vmfeq_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfeq_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vmfeq_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfge.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfge.c index dbc4fe851c946..998e0beb015db 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfge.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfge.c @@ -248,242 +248,242 @@ vbool8_t test_vmfge_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vmfge_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfge_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfge_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vmfge_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfge_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfge_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vmfge_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfge_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfge_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfge_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vmfge_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfge_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfge_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfge_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vmfge_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vmfge_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vmfge_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vmfge_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vmfge_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vmfge_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vmfge_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vmfge_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vmfge_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vmfge_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vmfge_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfge_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vmfge_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vmfge_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vmfge_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfge_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vmfge_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vmfge_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vmfge_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfge_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vmfge_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vmfge_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vmfge_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfge_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vmfge_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vv_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfge_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vmfge_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfge_vf_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfge.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vmfge_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfge_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vmfge_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfgt.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfgt.c index 400c801b30e09..6aa380bdb32aa 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfgt.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfgt.c @@ -248,242 +248,242 @@ vbool8_t test_vmfgt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfgt_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfgt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfgt_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfgt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfgt_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfgt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfgt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfgt_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfgt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfgt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vmfgt_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vmfgt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vmfgt_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vmfgt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vmfgt_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vmfgt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vmfgt_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfgt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vmfgt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vmfgt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vmfgt_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfgt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vmfgt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vmfgt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vmfgt_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfgt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vmfgt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vmfgt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vmfgt_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfgt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vmfgt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vv_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfgt_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vmfgt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfgt_vf_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfgt.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vmfgt_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfgt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vmfgt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfle.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfle.c index 56451cf7df191..cf692a59dd21b 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfle.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfle.c @@ -248,242 +248,241 @@ vbool8_t test_vmfle_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vmfle_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfle_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfle_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vmfle_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfle_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfle_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vmfle_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfle_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfle_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfle_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vmfle_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfle_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfle_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfle_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vmfle_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vmfle_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vmfle_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vmfle_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vmfle_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vmfle_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vmfle_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vmfle_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vmfle_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vmfle_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vmfle_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfle_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vmfle_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vmfle_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vmfle_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfle_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vmfle_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vmfle_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vmfle_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfle_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vmfle_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vmfle_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vmfle_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfle_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vmfle_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vv_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfle_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vmfle_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfle_vf_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfle.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vmfle_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfle_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vmfle_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmflt.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmflt.c index aef6a3f703253..fe92e4c0092b0 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmflt.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmflt.c @@ -248,242 +248,241 @@ vbool8_t test_vmflt_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vmflt_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vmflt_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vmflt_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vmflt_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vmflt_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vmflt_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vmflt_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vmflt_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vmflt_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmflt_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vmflt_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vmflt_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmflt_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vmflt_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vmflt_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vmflt_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vmflt_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vmflt_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vmflt_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vmflt_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vmflt_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vmflt_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vmflt_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vmflt_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vmflt_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmflt_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vmflt_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vmflt_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vmflt_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmflt_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vmflt_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vmflt_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vmflt_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmflt_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vmflt_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vmflt_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vmflt_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmflt_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vmflt_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vv_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmflt_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vmflt_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmflt_vf_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmflt.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vmflt_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmflt_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vmflt_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } - diff --git a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfne.c b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfne.c index 8d1dcd24ee7e4..8e0103c0baba3 100644 --- a/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfne.c +++ b/clang/test/CodeGen/RISCV/rvv0p71-intrinsics-handcrafted/vector-floating/wrappers/vmfne.c @@ -248,242 +248,242 @@ vbool8_t test_vmfne_vf_f64m8_b8(vfloat64m8_t op1, double op2, size_t vl) { } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f16.nxv4f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f16.nxv4f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) { + return __riscv_vmfne_vv_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f16m1_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfne_vf_f16m1_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f16m1_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfne_vf_f16m1_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f16.nxv8f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f16.nxv8f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) { + return __riscv_vmfne_vv_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f16m2_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfne_vf_f16m2_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f16m2_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfne_vf_f16m2_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f16.nxv16f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f16.nxv16f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vv_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) { + return __riscv_vmfne_vv_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f16m4_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfne_vf_f16m4_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vf_f16m4_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfne_vf_f16m4_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv32f16.nxv32f16.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv32f16.nxv32f16.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfne_vv_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) { + return __riscv_vmfne_vv_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f16m8_b2_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], half noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv32f16.f16.i64( poison, [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv32f16.f16.i64( [[MASKEDOFF]], [[OP1]], half [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) { - return __riscv_vmfne_vf_f16m8_b2_m(mask, op1, op2, vl); +vbool2_t test_vmfne_vf_f16m8_b2_m(vbool2_t mask, vbool2_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) { + return __riscv_vmfne_vf_f16m8_b2_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f32.nxv2f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f32.nxv2f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) { + return __riscv_vmfne_vv_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f32m1_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) { - return __riscv_vmfne_vf_f32m1_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f32m1_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) { + return __riscv_vmfne_vf_f32m1_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f32.nxv4f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f32.nxv4f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) { + return __riscv_vmfne_vv_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f32m2_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) { - return __riscv_vmfne_vf_f32m2_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f32m2_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) { + return __riscv_vmfne_vf_f32m2_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f32.nxv8f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f32.nxv8f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) { + return __riscv_vmfne_vv_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f32m4_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) { - return __riscv_vmfne_vf_f32m4_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f32m4_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) { + return __riscv_vmfne_vf_f32m4_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f32.nxv16f32.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f32.nxv16f32.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vv_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) { + return __riscv_vmfne_vv_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f32m8_b4_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], float noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f32.f32.i64( poison, [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv16f32.f32.i64( [[MASKEDOFF]], [[OP1]], float [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) { - return __riscv_vmfne_vf_f32m8_b4_m(mask, op1, op2, vl); +vbool4_t test_vmfne_vf_f32m8_b4_m(vbool4_t mask, vbool4_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) { + return __riscv_vmfne_vf_f32m8_b4_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv1f64.nxv1f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv1f64.nxv1f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vv_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) { + return __riscv_vmfne_vv_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f64m1_b64_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv1f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv1f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) { - return __riscv_vmfne_vf_f64m1_b64_m(mask, op1, op2, vl); +vbool64_t test_vmfne_vf_f64m1_b64_m(vbool64_t mask, vbool64_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) { + return __riscv_vmfne_vf_f64m1_b64_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f64.nxv2f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f64.nxv2f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vv_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) { + return __riscv_vmfne_vv_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f64m2_b32_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv2f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) { - return __riscv_vmfne_vf_f64m2_b32_m(mask, op1, op2, vl); +vbool32_t test_vmfne_vf_f64m2_b32_m(vbool32_t mask, vbool32_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) { + return __riscv_vmfne_vf_f64m2_b32_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f64.nxv4f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f64.nxv4f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vv_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) { + return __riscv_vmfne_vv_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f64m4_b16_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv4f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) { - return __riscv_vmfne_vf_f64m4_b16_m(mask, op1, op2, vl); +vbool16_t test_vmfne_vf_f64m4_b16_m(vbool16_t mask, vbool16_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) { + return __riscv_vmfne_vf_f64m4_b16_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vv_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f64.nxv8f64.i64( poison, [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f64.nxv8f64.i64( [[MASKEDOFF]], [[OP1]], [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { - return __riscv_vmfne_vv_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vv_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) { + return __riscv_vmfne_vv_f64m8_b8_m(mask, maskedoff, op1, op2, vl); } // CHECK-RV64-LABEL: define dso_local @test_vmfne_vf_f64m8_b8_m -// CHECK-RV64-SAME: ( [[MASK:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { +// CHECK-RV64-SAME: ( [[MASK:%.*]], [[MASKEDOFF:%.*]], [[OP1:%.*]], double noundef [[OP2:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] { // CHECK-RV64-NEXT: entry: -// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f64.f64.i64( poison, [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) +// CHECK-RV64-NEXT: [[TMP0:%.*]] = call @llvm.riscv.th.vmfne.mask.nxv8f64.f64.i64( [[MASKEDOFF]], [[OP1]], double [[OP2]], [[MASK]], i64 [[VL]]) // CHECK-RV64-NEXT: ret [[TMP0]] // -vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) { - return __riscv_vmfne_vf_f64m8_b8_m(mask, op1, op2, vl); +vbool8_t test_vmfne_vf_f64m8_b8_m(vbool8_t mask, vbool8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) { + return __riscv_vmfne_vf_f64m8_b8_m(mask, maskedoff, op1, op2, vl); }