; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+m,+d,+zfh,+v,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s \ ; RUN: -check-prefixes=CHECK,CHECK-RV32 ; RUN: llc -mtriple=riscv64 -mattr=+m,+d,+zfh,+v,+zvfh \ ; RUN: -verify-machineinstrs < %s | FileCheck %s \ ; RUN: -check-prefixes=CHECK,CHECK-RV64 declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr, i8, , i32) define @strided_vpload_nxv1i8_i8(ptr %ptr, i8 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8_i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i8(ptr %ptr, i8 %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr, i16, , i32) define @strided_vpload_nxv1i8_i16(ptr %ptr, i16 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8_i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i16(ptr %ptr, i16 %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr, i64, , i32) define @strided_vpload_nxv1i8_i64(ptr %ptr, i64 signext %stride, %m, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr %ptr, i64 %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv1i8_i64_allones_mask(ptr %ptr, i64 signext %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64_allones_mask: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma ; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64_allones_mask: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i64(ptr %ptr, i64 %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv1i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv1i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i8_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, mf8, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv1i8.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, mf4, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i8.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, mf2, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i8.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8i8(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv8i8_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8_unit_stride: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 1, %m, i32 %evl) ret %load } define @strided_vpload_nxv8i8_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i8_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e8, m1, ta, ma ; CHECK-NEXT: vlse8.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv8i8.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv1i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv2i16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i16_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv2i16.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv4i16_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i16_unit_stride: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i16.p0.i32(ptr %ptr, i32 2, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8i16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv1i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv2i32_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i32_unit_stride: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i32.p0.i32(ptr %ptr, i32 4, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv4i32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i32_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv4i32.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8i32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv1i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv1i64_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i64_unit_stride: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 8, %m, i32 %evl) ret %load } define @strided_vpload_nxv1i64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1i64_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv1i64.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2i64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4i64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8i64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8i64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv1f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, mf4, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv2f16_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f16_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, mf2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv2f16.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv4f16_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f16_unit_stride: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vle16.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f16.p0.i32(ptr %ptr, i32 2, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8f16(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e16, m2, ta, ma ; CHECK-NEXT: vlse16.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8f16.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv1f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, mf2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv2f32_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f32_unit_stride: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f32.p0.i32(ptr %ptr, i32 4, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m2, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8f32(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv8f32_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f32_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e32, m4, ta, ma ; CHECK-NEXT: vlse32.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv8f32.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv1f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m1, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv1f64_unit_stride(ptr %ptr, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv1f64_unit_stride: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma ; CHECK-NEXT: vle64.v v8, (a0), v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv1f64.p0.i32(ptr %ptr, i32 8, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv2f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m2, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv2f64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv4f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } define @strided_vpload_nxv4f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv4f64_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret %a = insertelement poison, i1 true, i32 0 %b = shufflevector %a, poison, zeroinitializer %load = call @llvm.experimental.vp.strided.load.nxv4f64.p0.i32(ptr %ptr, i32 signext %stride, %b, i32 %evl) ret %load } declare @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr, i32, , i32) define @strided_vpload_nxv8f64(ptr %ptr, i32 signext %stride, %m, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %load = call @llvm.experimental.vp.strided.load.nxv8f64.p0.i32(ptr %ptr, i32 signext %stride, %m, i32 %evl) ret %load } ; Widening define @strided_vpload_nxv3f64(ptr %ptr, i32 signext %stride, %mask, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv3f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, %mask, i32 %evl) ret %v } define @strided_vpload_nxv3f64_allones_mask(ptr %ptr, i32 signext %stride, i32 zeroext %evl) { ; CHECK-LABEL: strided_vpload_nxv3f64_allones_mask: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a2, e64, m4, ta, ma ; CHECK-NEXT: vlse64.v v8, (a0), a1 ; CHECK-NEXT: ret %one = insertelement poison, i1 true, i32 0 %allones = shufflevector %one, poison, zeroinitializer %v = call @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr %ptr, i32 %stride, %allones, i32 %evl) ret %v } declare @llvm.experimental.vp.strided.load.nxv3f64.p0.i32(ptr, i32, , i32) ; Splitting define @strided_load_nxv16f64(ptr %ptr, i64 %stride, %mask, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_nxv16f64: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: vmv1r.v v9, v0 ; CHECK-RV32-NEXT: csrr a4, vlenb ; CHECK-RV32-NEXT: sub a2, a3, a4 ; CHECK-RV32-NEXT: sltu a5, a3, a2 ; CHECK-RV32-NEXT: addi a5, a5, -1 ; CHECK-RV32-NEXT: and a2, a5, a2 ; CHECK-RV32-NEXT: bltu a3, a4, .LBB49_2 ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: mv a3, a4 ; CHECK-RV32-NEXT: .LBB49_2: ; CHECK-RV32-NEXT: mul a5, a3, a1 ; CHECK-RV32-NEXT: add a5, a0, a5 ; CHECK-RV32-NEXT: srli a4, a4, 3 ; CHECK-RV32-NEXT: vsetvli a6, zero, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v8, v9, a4 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v16, (a5), a1, v0.t ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v9 ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_load_nxv16f64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: vmv1r.v v9, v0 ; CHECK-RV64-NEXT: csrr a4, vlenb ; CHECK-RV64-NEXT: sub a3, a2, a4 ; CHECK-RV64-NEXT: sltu a5, a2, a3 ; CHECK-RV64-NEXT: addi a5, a5, -1 ; CHECK-RV64-NEXT: and a3, a5, a3 ; CHECK-RV64-NEXT: bltu a2, a4, .LBB49_2 ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: mv a2, a4 ; CHECK-RV64-NEXT: .LBB49_2: ; CHECK-RV64-NEXT: mul a5, a2, a1 ; CHECK-RV64-NEXT: add a5, a0, a5 ; CHECK-RV64-NEXT: srli a4, a4, 3 ; CHECK-RV64-NEXT: vsetvli a6, zero, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v8, v9, a4 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v16, (a5), a1, v0.t ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v9 ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: ret %v = call @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, %mask, i32 %evl) ret %v } define @strided_load_nxv16f64_allones_mask(ptr %ptr, i64 %stride, i32 zeroext %evl) { ; CHECK-RV32-LABEL: strided_load_nxv16f64_allones_mask: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: csrr a4, vlenb ; CHECK-RV32-NEXT: sub a2, a3, a4 ; CHECK-RV32-NEXT: sltu a5, a3, a2 ; CHECK-RV32-NEXT: addi a5, a5, -1 ; CHECK-RV32-NEXT: and a2, a5, a2 ; CHECK-RV32-NEXT: bltu a3, a4, .LBB50_2 ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: mv a3, a4 ; CHECK-RV32-NEXT: .LBB50_2: ; CHECK-RV32-NEXT: mul a4, a3, a1 ; CHECK-RV32-NEXT: add a4, a0, a4 ; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1 ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_load_nxv16f64_allones_mask: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: csrr a4, vlenb ; CHECK-RV64-NEXT: sub a3, a2, a4 ; CHECK-RV64-NEXT: sltu a5, a2, a3 ; CHECK-RV64-NEXT: addi a5, a5, -1 ; CHECK-RV64-NEXT: and a3, a5, a3 ; CHECK-RV64-NEXT: bltu a2, a4, .LBB50_2 ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: mv a2, a4 ; CHECK-RV64-NEXT: .LBB50_2: ; CHECK-RV64-NEXT: mul a4, a2, a1 ; CHECK-RV64-NEXT: add a4, a0, a4 ; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1 ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1 ; CHECK-RV64-NEXT: ret %one = insertelement poison, i1 true, i32 0 %allones = shufflevector %one, poison, zeroinitializer %v = call @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr %ptr, i64 %stride, %allones, i32 %evl) ret %v } declare @llvm.experimental.vp.strided.load.nxv16f64.p0.i64(ptr, i64, , i32) ; Widening + splitting (with HiIsEmpty == true) ; NOTE: We can't return as that introduces a vector ; store that can't yet be legalized through widening. In order to test purely ; the vp.strided.load legalization, we manually split it. define @strided_load_nxv17f64(ptr %ptr, i64 %stride, %mask, i32 zeroext %evl, * %hi_ptr) { ; CHECK-RV32-LABEL: strided_load_nxv17f64: ; CHECK-RV32: # %bb.0: ; CHECK-RV32-NEXT: csrr a2, vlenb ; CHECK-RV32-NEXT: slli a7, a2, 1 ; CHECK-RV32-NEXT: vmv1r.v v8, v0 ; CHECK-RV32-NEXT: mv a6, a3 ; CHECK-RV32-NEXT: bltu a3, a7, .LBB51_2 ; CHECK-RV32-NEXT: # %bb.1: ; CHECK-RV32-NEXT: mv a6, a7 ; CHECK-RV32-NEXT: .LBB51_2: ; CHECK-RV32-NEXT: sub a5, a6, a2 ; CHECK-RV32-NEXT: sltu t0, a6, a5 ; CHECK-RV32-NEXT: addi t0, t0, -1 ; CHECK-RV32-NEXT: and t0, t0, a5 ; CHECK-RV32-NEXT: mv a5, a6 ; CHECK-RV32-NEXT: bltu a6, a2, .LBB51_4 ; CHECK-RV32-NEXT: # %bb.3: ; CHECK-RV32-NEXT: mv a5, a2 ; CHECK-RV32-NEXT: .LBB51_4: ; CHECK-RV32-NEXT: mul t1, a5, a1 ; CHECK-RV32-NEXT: add t1, a0, t1 ; CHECK-RV32-NEXT: srli t2, a2, 3 ; CHECK-RV32-NEXT: vsetvli t3, zero, e8, mf4, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, t2 ; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t ; CHECK-RV32-NEXT: sub a7, a3, a7 ; CHECK-RV32-NEXT: sltu a3, a3, a7 ; CHECK-RV32-NEXT: addi a3, a3, -1 ; CHECK-RV32-NEXT: and a3, a3, a7 ; CHECK-RV32-NEXT: bltu a3, a2, .LBB51_6 ; CHECK-RV32-NEXT: # %bb.5: ; CHECK-RV32-NEXT: mv a3, a2 ; CHECK-RV32-NEXT: .LBB51_6: ; CHECK-RV32-NEXT: mul a6, a6, a1 ; CHECK-RV32-NEXT: add a6, a0, a6 ; CHECK-RV32-NEXT: srli a2, a2, 2 ; CHECK-RV32-NEXT: vsetvli a7, zero, e8, mf2, ta, ma ; CHECK-RV32-NEXT: vslidedown.vx v0, v8, a2 ; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma ; CHECK-RV32-NEXT: vlse64.v v24, (a6), a1, v0.t ; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV32-NEXT: vmv1r.v v0, v8 ; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV32-NEXT: vs1r.v v24, (a4) ; CHECK-RV32-NEXT: ret ; ; CHECK-RV64-LABEL: strided_load_nxv17f64: ; CHECK-RV64: # %bb.0: ; CHECK-RV64-NEXT: csrr a4, vlenb ; CHECK-RV64-NEXT: slli a7, a4, 1 ; CHECK-RV64-NEXT: vmv1r.v v8, v0 ; CHECK-RV64-NEXT: mv a6, a2 ; CHECK-RV64-NEXT: bltu a2, a7, .LBB51_2 ; CHECK-RV64-NEXT: # %bb.1: ; CHECK-RV64-NEXT: mv a6, a7 ; CHECK-RV64-NEXT: .LBB51_2: ; CHECK-RV64-NEXT: sub a5, a6, a4 ; CHECK-RV64-NEXT: sltu t0, a6, a5 ; CHECK-RV64-NEXT: addi t0, t0, -1 ; CHECK-RV64-NEXT: and t0, t0, a5 ; CHECK-RV64-NEXT: mv a5, a6 ; CHECK-RV64-NEXT: bltu a6, a4, .LBB51_4 ; CHECK-RV64-NEXT: # %bb.3: ; CHECK-RV64-NEXT: mv a5, a4 ; CHECK-RV64-NEXT: .LBB51_4: ; CHECK-RV64-NEXT: mul t1, a5, a1 ; CHECK-RV64-NEXT: add t1, a0, t1 ; CHECK-RV64-NEXT: srli t2, a4, 3 ; CHECK-RV64-NEXT: vsetvli t3, zero, e8, mf4, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, t2 ; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t ; CHECK-RV64-NEXT: sub a7, a2, a7 ; CHECK-RV64-NEXT: sltu a2, a2, a7 ; CHECK-RV64-NEXT: addi a2, a2, -1 ; CHECK-RV64-NEXT: and a2, a2, a7 ; CHECK-RV64-NEXT: bltu a2, a4, .LBB51_6 ; CHECK-RV64-NEXT: # %bb.5: ; CHECK-RV64-NEXT: mv a2, a4 ; CHECK-RV64-NEXT: .LBB51_6: ; CHECK-RV64-NEXT: mul a6, a6, a1 ; CHECK-RV64-NEXT: add a6, a0, a6 ; CHECK-RV64-NEXT: srli a4, a4, 2 ; CHECK-RV64-NEXT: vsetvli a7, zero, e8, mf2, ta, ma ; CHECK-RV64-NEXT: vslidedown.vx v0, v8, a4 ; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma ; CHECK-RV64-NEXT: vlse64.v v24, (a6), a1, v0.t ; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma ; CHECK-RV64-NEXT: vmv1r.v v0, v8 ; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t ; CHECK-RV64-NEXT: vs1r.v v24, (a3) ; CHECK-RV64-NEXT: ret %v = call @llvm.experimental.vp.strided.load.nxv17f64.p0.i64(ptr %ptr, i64 %stride, %mask, i32 %evl) %lo = call @llvm.experimental.vector.extract.nxv16f64( %v, i64 0) %hi = call @llvm.experimental.vector.extract.nxv1f64( %v, i64 16) store %hi, * %hi_ptr ret %lo } declare @llvm.experimental.vp.strided.load.nxv17f64.p0.i64(ptr, i64, , i32) declare @llvm.experimental.vector.extract.nxv1f64( %vec, i64 %idx) declare @llvm.experimental.vector.extract.nxv16f64( %vec, i64 %idx)