; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zfh,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=ilp32d | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zfh,+zvfh \ ; RUN: -verify-machineinstrs -target-abi=lp64d | FileCheck %s --check-prefixes=CHECK,RV64 declare @llvm.riscv.vle.mask.nxv1i64( , *, , iXLen, iXLen) define @intrinsic_vle_mask_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vle_mask_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vle.mask.nxv1i64( %0, * %1, %2, iXLen %3, iXLen 1) ret %a } declare { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( , *, , iXLen, iXLen) define @intrinsic_vleff_mask_v_nxv1i64_nxv1i64( %0, * %1, %2, iXLen %3, iXLen* %4) nounwind { ; RV32-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV32: # %bb.0: # %entry ; RV32-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV32-NEXT: vle64ff.v v8, (a0), v0.t ; RV32-NEXT: csrr a0, vl ; RV32-NEXT: sw a0, 0(a2) ; RV32-NEXT: ret ; ; RV64-LABEL: intrinsic_vleff_mask_v_nxv1i64_nxv1i64: ; RV64: # %bb.0: # %entry ; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu ; RV64-NEXT: vle64ff.v v8, (a0), v0.t ; RV64-NEXT: csrr a0, vl ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: ret entry: %a = call { , iXLen } @llvm.riscv.vleff.mask.nxv1i64( %0, * %1, %2, iXLen %3, iXLen 1) %b = extractvalue { , iXLen } %a, 0 %c = extractvalue { , iXLen } %a, 1 store iXLen %c, iXLen* %4 ret %b } declare @llvm.riscv.vlse.mask.nxv1i64( , *, iXLen, , iXLen, iXLen) define @intrinsic_vlse_mask_v_nxv1i64_nxv1i64( %0, * %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vlse_mask_v_nxv1i64_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, mu ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vlse.mask.nxv1i64( %0, * %1, iXLen %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( , *, , , iXLen, iXLen) define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1iXLen( %0, * %1, %2, %3, iXLen %4) nounwind { entry: %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1iXLen( %0, * %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vadd.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwadd_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vrsub.mask.nxv1i8.i8( , , i8, , iXLen, iXLen) define @intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrsub_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vrsub.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrsub.mask.nxv1i8.i8( %0, %1, i8 %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vsub.mask.nxv1i8.i8( , , i8, , iXLen, iXLen) define @intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vsub_mask_vi_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vadd.vi v8, v9, 9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsub.mask.nxv1i8.i8( %0, %1, i8 -9, %2, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( , , , iXLen, iXLen) define @intrinsic_vzext_mask_vf2_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf2_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf2 v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i32( %1, %2, %0, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( , , , iXLen, iXLen) define @intrinsic_vzext_mask_vf4_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf4_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i16( %1, %2, %0, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( , , , iXLen, iXLen) define @intrinsic_vzext_mask_vf8_nxv1i64( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vzext_mask_vf8_nxv1i64: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vzext.vf8 v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vzext.mask.nxv1i64.nxv1i8( %1, %2, %0, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vand.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vand_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vand.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vand.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsll_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsll.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsll.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnsra_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsra.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnsra.mask.nxv1i8.nxv1i16.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmin_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmin.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vdiv_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vdiv.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmul_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmul.mask.nxv1i16.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vmacc.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vwmacc.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vsadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsadd.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen, iXLen) define @intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vaadd_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vaadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vaadd.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen 0, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen, iXLen) define @intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vsmul_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vsmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vsmul.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen 0, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( , , , , iXLen, iXLen, iXLen) define @intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vssrl_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vssrl.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vssrl.mask.nxv1i8.nxv1i8( %0, %1, %2, %3, iXLen 0, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( , , , , iXLen, iXLen, iXLen) define @intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vnclip_mask_wv_nxv1i8_nxv1i16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: csrwi vxrm, 0 ; CHECK-NEXT: vnclip.wv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vnclip.mask.nxv1i8.nxv1i16.nxv1i8( %0, %1, %2, %3, iXLen 0, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( , , , , iXLen, iXLen, iXLen); define @intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfadd_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfadd.mask.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( , , , , iXLen, iXLen, iXLen) define @intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwadd_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwadd.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( , , , , iXLen, iXLen, iXLen) define @intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmul_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmul.mask.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( , , , , iXLen, iXLen, iXLen) define @intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfdiv_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfdiv.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfdiv.mask.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfrdiv.mask.nxv1f16.f16( , , half, , iXLen, iXLen, iXLen) define @intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfrdiv_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrdiv.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrdiv.mask.nxv1f16.f16( %0, %1, half %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( , , , , iXLen, iXLen, iXLen) define @intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmul_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwmul.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmul.mask.nxv1f32.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( , , , , iXLen, iXLen, iXLen) define @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmacc.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( , , , , iXLen, iXLen, iXLen) define @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwmacc.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16( %0, %1, %2, %3, iXLen 7, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfsqrt.mask.nxv1f16( , , , iXLen, iXLen, iXLen) define @intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfsqrt_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsqrt.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsqrt.mask.nxv1f16( %0, %1, %2, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrsqrt7.mask.nxv1f16( , , , iXLen, iXLen) define @intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrsqrt7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrsqrt7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrsqrt7.mask.nxv1f16( %1, %2, %0, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfrec7.mask.nxv1f16( , , , iXLen, iXLen, iXLen) define @intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfrec7_mask_v_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfrec7.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfrec7.mask.nxv1f16( %1, %2, %0, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( , , , , iXLen, iXLen) define @intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfmin_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfmin.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfmin.mask.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( , , , , iXLen, iXLen) define @intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfsgnj_mask_vv_nxv1f16_nxv1f16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfsgnj.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfsgnj.mask.nxv1f16.nxv1f16( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfclass.mask.nxv1i16( , , , iXLen, iXLen) define @intrinsic_vfclass_mask_v_nxv1i16_nxv1f16( ; CHECK-LABEL: intrinsic_vfclass_mask_v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfclass.v v8, v9, v0.t ; CHECK-NEXT: ret %0, %1, %2, iXLen %3) nounwind { entry: %a = call @llvm.riscv.vfclass.mask.nxv1i16( %0, %1, %2, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( , , , iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_xu.f.v_nxv1i16_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.xu.f.v.mask.nxv1i16.nxv1f16( %0, %1, %2, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( , , , iXLen, iXLen, iXLen) define @intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfcvt_mask_f.x.v_nxv1f16_nxv1i16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfcvt.f.x.v.mask.nxv1f16.nxv1i16( %0, %1, %2, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( , , , iXLen, iXLen, iXLen) define @intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_xu.f.v_nxv1i32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.xu.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.xu.f.v.mask.nxv1i32.nxv1f16( %0, %1, %2, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( , , , iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.x.v_nxv1f16_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfwcvt.f.x.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.x.v.mask.nxv1f16.nxv1i8( %0, %1, %2, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( , , , iXLen, iXLen) define @intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfwcvt_mask_f.f.v_nxv1f32_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfwcvt.f.f.v.mask.nxv1f32.nxv1f16( %0, %1, %2, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( , , , iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_xu.f.w_nxv1i8_nxv1f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vfncvt.xu.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.xu.f.w.mask.nxv1i8.nxv1f16( %0, %1, %2, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( , , , iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.x.w_nxv1f16_nxv1i32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.x.w.mask.nxv1f16.nxv1i32( %0, %1, %2, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( , , , iXLen, iXLen, iXLen) define @intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32( %0, %1, %2, iXLen %3) nounwind { ; CHECK-LABEL: intrinsic_vfncvt_mask_f.f.w_nxv1f16_nxv1f32: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v9, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfncvt.f.f.w.mask.nxv1f16.nxv1f32( %0, %1, %2, iXLen 7, iXLen %3, iXLen 1) ret %a } declare @llvm.riscv.vslideup.mask.nxv1i8( , , iXLen, , iXLen, iXLen) define @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8( %0, %1, iXLen %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslideup.mask.nxv1i8( %0, %1, iXLen %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vslide1up.mask.nxv1i8.i8( , , i8, , iXLen, iXLen) define @intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8( %0, %1, i8 %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vslide1up_mask_vx_nxv1i8_nxv1i8_i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslide1up.vx v8, v9, a0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.mask.nxv1i8.i8( %0, %1, i8 %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vfslide1up.mask.nxv1f16.f16( , , half, , iXLen, iXLen) define @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16( %0, %1, half %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vfslide1up.vf v8, v9, fa0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vfslide1up.mask.nxv1f16.f16( %0, %1, half %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( , , , , iXLen, iXLen) define @intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgather_mask_vv_nxv1i8_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgather.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgather.vv.mask.nxv1i8.iXLen( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( , , , , iXLen, iXLen) define @intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, iXLen %4) nounwind { ; CHECK-LABEL: intrinsic_vrgatherei16_mask_vv_nxv1i8_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vrgatherei16.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vrgatherei16.vv.mask.nxv1i8( %0, %1, %2, %3, iXLen %4, iXLen 1) ret %a } declare @llvm.riscv.vid.mask.nxv1i8( , , iXLen, iXLen); define @intrinsic_vid_mask_v_nxv1i8( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_vid_mask_v_nxv1i8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vid.v v8, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.vid.mask.nxv1i8( %0, %1, iXLen %2, iXLen 1) ret %a } declare @llvm.riscv.viota.mask.nxv1i8( , , , iXLen, iXLen); define @intrinsic_viota_mask_m_nxv1i8_nxv1i1( %0, %1, iXLen %2) nounwind { ; CHECK-LABEL: intrinsic_viota_mask_m_nxv1i8_nxv1i1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: viota.m v8, v0, v0.t ; CHECK-NEXT: ret entry: %a = call @llvm.riscv.viota.mask.nxv1i8( %0, %1, %1, iXLen %2, iXLen 1) ret %a }