; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64V ; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvl128b -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64,RV64ZVE32 define void @buildvec_vid_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_undefelts_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_undefelts_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } ; TODO: Could do VID then insertelement on missing elements define void @buildvec_notquite_vid_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_notquite_vid_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI2_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI2_0) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_plus_imm_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_plus_imm_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vi v8, v8, 2 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_plus_nonimm_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_plus_nonimm_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a1, %hi(.LCPI4_0) ; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0) ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vle8.v v8, (a1) ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_vid_mpy_imm_v16i8(ptr %x) { ; CHECK-LABEL: buildvec_vid_mpy_imm_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 3 ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <16 x i8> , ptr %x ret void } define <4 x i8> @buildvec_vid_step2_add0_v4i8() { ; CHECK-LABEL: buildvec_vid_step2_add0_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_step2_add0_v4i8_undef0() { ; CHECK-LABEL: buildvec_vid_step2_add0_v4i8_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_step2_add0_v4i8_undef1() { ; CHECK-LABEL: buildvec_vid_step2_add0_v4i8_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_step2_add0_v4i8_undef2() { ; CHECK-LABEL: buildvec_vid_step2_add0_v4i8_undef2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_step2_add1_v4i8() { ; CHECK-LABEL: buildvec_vid_step2_add1_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_step2_add1_v4i8_undef0() { ; CHECK-LABEL: buildvec_vid_step2_add1_v4i8_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_step2_add1_v4i8_undef1() { ; CHECK-LABEL: buildvec_vid_step2_add1_v4i8_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_step2_add1_v4i8_undef2() { ; CHECK-LABEL: buildvec_vid_step2_add1_v4i8_undef2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vadd.vi v8, v8, 1 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn1_add0_v4i8() { ; CHECK-LABEL: buildvec_vid_stepn1_add0_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn1_add0_v4i8_undef0() { ; CHECK-LABEL: buildvec_vid_stepn1_add0_v4i8_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn1_add0_v4i8_undef1() { ; CHECK-LABEL: buildvec_vid_stepn1_add0_v4i8_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn1_add0_v4i8_undef2() { ; CHECK-LABEL: buildvec_vid_stepn1_add0_v4i8_undef2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn2_add0_v4i8() { ; CHECK-LABEL: buildvec_vid_stepn2_add0_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn2_add0_v4i8_undef0() { ; CHECK-LABEL: buildvec_vid_stepn2_add0_v4i8_undef0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn2_add0_v4i8_undef1() { ; CHECK-LABEL: buildvec_vid_stepn2_add0_v4i8_undef1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn2_add0_v4i8_undef2() { ; CHECK-LABEL: buildvec_vid_stepn2_add0_v4i8_undef2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, -6 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn2_add3_v4i8() { ; CHECK-LABEL: buildvec_vid_stepn2_add3_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vadd.vv v8, v8, v8 ; CHECK-NEXT: vrsub.vi v8, v8, 3 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_vid_stepn3_add3_v4i8() { ; CHECK-LABEL: buildvec_vid_stepn3_add3_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v9, 3 ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a0, 253 ; CHECK-NEXT: vmadd.vx v8, a0, v9 ; CHECK-NEXT: ret ret <4 x i8> } define void @buildvec_vid_stepn3_addn3_v4i32(ptr %z0, ptr %z1, ptr %z2, ptr %z3) { ; CHECK-LABEL: buildvec_vid_stepn3_addn3_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, -3 ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: li a4, -3 ; CHECK-NEXT: vmadd.vx v9, a4, v8 ; CHECK-NEXT: vse32.v v9, (a0) ; CHECK-NEXT: vse32.v v9, (a1) ; CHECK-NEXT: vse32.v v9, (a2) ; CHECK-NEXT: vse32.v v9, (a3) ; CHECK-NEXT: ret store <4 x i32> , ptr %z0 store <4 x i32> , ptr %z1 store <4 x i32> , ptr %z2 store <4 x i32> , ptr %z3 ret void } ; FIXME: RV32 doesn't catch this pattern due to BUILD_VECTOR legalization. define <4 x i64> @buildvec_vid_step1_add0_v4i64() { ; RV32-LABEL: buildvec_vid_step1_add0_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI25_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI25_0) ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vle8.v v10, (a0) ; RV32-NEXT: vsext.vf4 v8, v10 ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_vid_step1_add0_v4i64: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64V-NEXT: vid.v v8 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_vid_step1_add0_v4i64: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: li a1, 3 ; RV64ZVE32-NEXT: sd a1, 24(a0) ; RV64ZVE32-NEXT: li a1, 2 ; RV64ZVE32-NEXT: sd a1, 16(a0) ; RV64ZVE32-NEXT: li a1, 1 ; RV64ZVE32-NEXT: sd a1, 8(a0) ; RV64ZVE32-NEXT: sd zero, 0(a0) ; RV64ZVE32-NEXT: ret ret <4 x i64> } define <4 x i64> @buildvec_vid_step2_add0_v4i64() { ; RV32-LABEL: buildvec_vid_step2_add0_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI26_0) ; RV32-NEXT: addi a0, a0, %lo(.LCPI26_0) ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vle8.v v10, (a0) ; RV32-NEXT: vsext.vf4 v8, v10 ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_vid_step2_add0_v4i64: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64V-NEXT: vid.v v8 ; RV64V-NEXT: vadd.vv v8, v8, v8 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_vid_step2_add0_v4i64: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: li a1, 6 ; RV64ZVE32-NEXT: sd a1, 24(a0) ; RV64ZVE32-NEXT: li a1, 4 ; RV64ZVE32-NEXT: sd a1, 16(a0) ; RV64ZVE32-NEXT: li a1, 2 ; RV64ZVE32-NEXT: sd a1, 8(a0) ; RV64ZVE32-NEXT: sd zero, 0(a0) ; RV64ZVE32-NEXT: ret ret <4 x i64> } define <4 x i8> @buildvec_no_vid_v4i8_0() { ; CHECK-LABEL: buildvec_no_vid_v4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 28768 ; CHECK-NEXT: addi a0, a0, 769 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_no_vid_v4i8_1() { ; CHECK-LABEL: buildvec_no_vid_v4i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 28752 ; CHECK-NEXT: addi a0, a0, 512 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_no_vid_v4i8_2() { ; CHECK-LABEL: buildvec_no_vid_v4i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 32768 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_no_vid_v4i8_3() { ; CHECK-LABEL: buildvec_no_vid_v4i8_3: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 28672 ; CHECK-NEXT: addi a0, a0, 255 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_no_vid_v4i8_4() { ; CHECK-LABEL: buildvec_no_vid_v4i8_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v8, -2 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_no_vid_v4i8_5() { ; CHECK-LABEL: buildvec_no_vid_v4i8_5: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 1032144 ; CHECK-NEXT: addi a0, a0, -257 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ret <4 x i8> } define void @buildvec_dominant0_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_dominant0_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, zero ; CHECK-NEXT: vmv.v.i v9, 8 ; CHECK-NEXT: vsetivli zero, 4, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vse16.v v9, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define void @buildvec_dominant1_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_dominant1_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 8 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define <2 x i8> @buildvec_dominant0_v2i8() { ; CHECK-LABEL: buildvec_dominant0_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: ret ret <2 x i8> } define <2 x i8> @buildvec_dominant1_v2i8() { ; RV32-LABEL: buildvec_dominant1_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vmv.v.i v8, -1 ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_dominant1_v2i8: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64V-NEXT: vmv.v.i v8, -1 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_dominant1_v2i8: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32-NEXT: vmv.v.i v8, -1 ; RV64ZVE32-NEXT: ret ret <2 x i8> } define <2 x i8> @buildvec_dominant2_v2i8() { ; RV32-LABEL: buildvec_dominant2_v2i8: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV32-NEXT: vid.v v8 ; RV32-NEXT: vrsub.vi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_dominant2_v2i8: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e8, mf8, ta, ma ; RV64V-NEXT: vid.v v8 ; RV64V-NEXT: vrsub.vi v8, v8, 0 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_dominant2_v2i8: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64ZVE32-NEXT: vid.v v8 ; RV64ZVE32-NEXT: vrsub.vi v8, v8, 0 ; RV64ZVE32-NEXT: ret ret <2 x i8> } define void @buildvec_dominant0_v2i32(ptr %x) { ; RV32-LABEL: buildvec_dominant0_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI38_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI38_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_dominant0_v2i32: ; RV64V: # %bb.0: ; RV64V-NEXT: lui a1, %hi(.LCPI38_0) ; RV64V-NEXT: ld a1, %lo(.LCPI38_0)(a1) ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vmv.v.i v8, -1 ; RV64V-NEXT: vsetvli zero, zero, e64, m1, tu, ma ; RV64V-NEXT: vmv.s.x v8, a1 ; RV64V-NEXT: vse64.v v8, (a0) ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_dominant0_v2i32: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: lui a1, %hi(.LCPI38_0) ; RV64ZVE32-NEXT: ld a1, %lo(.LCPI38_0)(a1) ; RV64ZVE32-NEXT: li a2, -1 ; RV64ZVE32-NEXT: sd a2, 8(a0) ; RV64ZVE32-NEXT: sd a1, 0(a0) ; RV64ZVE32-NEXT: ret store <2 x i64> , ptr %x ret void } define void @buildvec_dominant1_optsize_v2i32(ptr %x) optsize { ; RV32-LABEL: buildvec_dominant1_optsize_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI39_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI39_0) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vle32.v v8, (a1) ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_dominant1_optsize_v2i32: ; RV64V: # %bb.0: ; RV64V-NEXT: lui a1, %hi(.LCPI39_0) ; RV64V-NEXT: addi a1, a1, %lo(.LCPI39_0) ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vle64.v v8, (a1) ; RV64V-NEXT: vse64.v v8, (a0) ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_dominant1_optsize_v2i32: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: lui a1, %hi(.LCPI39_0) ; RV64ZVE32-NEXT: ld a1, %lo(.LCPI39_0)(a1) ; RV64ZVE32-NEXT: li a2, -1 ; RV64ZVE32-NEXT: sd a2, 8(a0) ; RV64ZVE32-NEXT: sd a1, 0(a0) ; RV64ZVE32-NEXT: ret store <2 x i64> , ptr %x ret void } define void @buildvec_seq_v8i8_v4i16(ptr %x) { ; CHECK-LABEL: buildvec_seq_v8i8_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 513 ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <8 x i8> , ptr %x ret void } define void @buildvec_seq_v8i8_v2i32(ptr %x) { ; RV32-LABEL: buildvec_seq_v8i8_v2i32: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, 48 ; RV32-NEXT: addi a1, a1, 513 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_seq_v8i8_v2i32: ; RV64V: # %bb.0: ; RV64V-NEXT: lui a1, 48 ; RV64V-NEXT: addi a1, a1, 513 ; RV64V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64V-NEXT: vmv.v.x v8, a1 ; RV64V-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64V-NEXT: vse8.v v8, (a0) ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_seq_v8i8_v2i32: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: lui a1, 48 ; RV64ZVE32-NEXT: addi a1, a1, 513 ; RV64ZVE32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV64ZVE32-NEXT: vmv.v.x v8, a1 ; RV64ZVE32-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; RV64ZVE32-NEXT: vse8.v v8, (a0) ; RV64ZVE32-NEXT: ret store <8 x i8> , ptr %x ret void } define void @buildvec_seq_v16i8_v2i64(ptr %x) { ; RV32-LABEL: buildvec_seq_v16i8_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, %hi(.LCPI42_0) ; RV32-NEXT: addi a1, a1, %lo(.LCPI42_0) ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vle8.v v8, (a1) ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_seq_v16i8_v2i64: ; RV64V: # %bb.0: ; RV64V-NEXT: lui a1, %hi(.LCPI42_0) ; RV64V-NEXT: addi a1, a1, %lo(.LCPI42_0) ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vlse64.v v8, (a1), zero ; RV64V-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64V-NEXT: vse8.v v8, (a0) ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_seq_v16i8_v2i64: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: lui a1, %hi(.LCPI42_0) ; RV64ZVE32-NEXT: addi a1, a1, %lo(.LCPI42_0) ; RV64ZVE32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32-NEXT: vle8.v v8, (a1) ; RV64ZVE32-NEXT: vse8.v v8, (a0) ; RV64ZVE32-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_seq2_v16i8_v2i64(ptr %x) { ; RV32-LABEL: buildvec_seq2_v16i8_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: lui a1, 528432 ; RV32-NEXT: addi a1, a1, 513 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV32-NEXT: vse8.v v8, (a0) ; RV32-NEXT: ret ; ; RV64V-LABEL: buildvec_seq2_v16i8_v2i64: ; RV64V: # %bb.0: ; RV64V-NEXT: lui a1, 528432 ; RV64V-NEXT: addiw a1, a1, 513 ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vmv.v.x v8, a1 ; RV64V-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64V-NEXT: vse8.v v8, (a0) ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: buildvec_seq2_v16i8_v2i64: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: lui a1, %hi(.LCPI43_0) ; RV64ZVE32-NEXT: addi a1, a1, %lo(.LCPI43_0) ; RV64ZVE32-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; RV64ZVE32-NEXT: vle8.v v8, (a1) ; RV64ZVE32-NEXT: vse8.v v8, (a0) ; RV64ZVE32-NEXT: ret store <16 x i8> , ptr %x ret void } define void @buildvec_seq_v9i8(ptr %x) { ; CHECK-LABEL: buildvec_seq_v9i8: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 73 ; CHECK-NEXT: vsetivli zero, 1, e16, m1, ta, ma ; CHECK-NEXT: vmv.s.x v0, a1 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v8, 3 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: li a1, 146 ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.s.x v0, a1 ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vmerge.vim v8, v8, 2, v0 ; CHECK-NEXT: vsetivli zero, 9, e8, m1, ta, ma ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <9 x i8> , ptr %x ret void } define void @buildvec_seq_v4i16_v2i32(ptr %x) { ; CHECK-LABEL: buildvec_seq_v4i16_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, -127 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.v.x v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <4 x i16> , ptr %x ret void } define void @buildvec_vid_step1o2_v4i32(ptr %z0, ptr %z1, ptr %z2, ptr %z3, ptr %z4, ptr %z5, ptr %z6) { ; RV32-LABEL: buildvec_vid_step1o2_v4i32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vid.v v8 ; RV32-NEXT: vsrl.vi v8, v8, 1 ; RV32-NEXT: vse32.v v8, (a0) ; RV32-NEXT: vse32.v v8, (a1) ; RV32-NEXT: vmv.v.i v9, 1 ; RV32-NEXT: vse32.v v8, (a2) ; RV32-NEXT: vse32.v v8, (a3) ; RV32-NEXT: vse32.v v8, (a4) ; RV32-NEXT: vmv.s.x v8, zero ; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV32-NEXT: vslideup.vi v9, v8, 1 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vse32.v v9, (a5) ; RV32-NEXT: vmv.v.i v8, 0 ; RV32-NEXT: li a0, 1 ; RV32-NEXT: vslide1down.vx v8, v8, a0 ; RV32-NEXT: vse32.v v8, (a6) ; RV32-NEXT: ret ; ; RV64-LABEL: buildvec_vid_step1o2_v4i32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vid.v v8 ; RV64-NEXT: vsrl.vi v8, v8, 1 ; RV64-NEXT: vse32.v v8, (a0) ; RV64-NEXT: vmv.v.i v9, 1 ; RV64-NEXT: vse32.v v8, (a1) ; RV64-NEXT: vse32.v v8, (a2) ; RV64-NEXT: vse32.v v8, (a3) ; RV64-NEXT: vse32.v v8, (a4) ; RV64-NEXT: vmv.s.x v8, zero ; RV64-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV64-NEXT: vslideup.vi v9, v8, 1 ; RV64-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-NEXT: vse32.v v9, (a5) ; RV64-NEXT: vmv.v.i v8, 0 ; RV64-NEXT: li a0, 1 ; RV64-NEXT: vslide1down.vx v8, v8, a0 ; RV64-NEXT: vse32.v v8, (a6) ; RV64-NEXT: ret store <4 x i32> , ptr %z0 store <4 x i32> , ptr %z1 store <4 x i32> , ptr %z2 store <4 x i32> , ptr %z3 store <4 x i32> , ptr %z4 ; We don't catch this one store <4 x i32> , ptr %z5 ; We catch this one but as VID/3 rather than VID/2 store <4 x i32> , ptr %z6 ret void } define void @buildvec_vid_step1o2_add3_v4i16(ptr %z0, ptr %z1, ptr %z2, ptr %z3, ptr %z4, ptr %z5, ptr %z6) { ; CHECK-LABEL: buildvec_vid_step1o2_add3_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsrl.vi v8, v8, 1 ; CHECK-NEXT: vadd.vi v8, v8, 3 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: vmv.v.i v9, 3 ; CHECK-NEXT: vse16.v v8, (a1) ; CHECK-NEXT: vse16.v v8, (a2) ; CHECK-NEXT: vse16.v v8, (a3) ; CHECK-NEXT: vse16.v v8, (a4) ; CHECK-NEXT: vmv.v.i v8, 4 ; CHECK-NEXT: vsetivli zero, 2, e16, mf2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 1 ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vse16.v v8, (a5) ; CHECK-NEXT: li a0, 4 ; CHECK-NEXT: vslide1down.vx v8, v9, a0 ; CHECK-NEXT: vse16.v v8, (a6) ; CHECK-NEXT: ret store <4 x i16> , ptr %z0 store <4 x i16> , ptr %z1 store <4 x i16> , ptr %z2 store <4 x i16> , ptr %z3 store <4 x i16> , ptr %z4 ; We don't catch this one store <4 x i16> , ptr %z5 ; We catch this one but as VID/3 rather than VID/2 store <4 x i16> , ptr %z6 ret void } define void @buildvec_vid_stepn1o4_addn5_v8i8(ptr %z0) { ; CHECK-LABEL: buildvec_vid_stepn1o4_addn5_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vrsub.vi v8, v8, -5 ; CHECK-NEXT: vse8.v v8, (a0) ; CHECK-NEXT: ret store <8 x i8> , ptr %z0 ret void } define void @buildvec_vid_mpy_imm_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_vid_mpy_imm_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: li a1, 17 ; CHECK-NEXT: vmul.vx v8, v8, a1 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define void @buildvec_vid_shl_imm_v8i16(ptr %x) { ; CHECK-LABEL: buildvec_vid_shl_imm_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vid.v v8 ; CHECK-NEXT: vsll.vi v8, v8, 9 ; CHECK-NEXT: vse16.v v8, (a0) ; CHECK-NEXT: ret store <8 x i16> , ptr %x ret void } define <4 x i32> @splat_c3_v4i32(<4 x i32> %v) { ; CHECK-LABEL: splat_c3_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 3 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <4 x i32> %v, i32 3 %ins = insertelement <4 x i32> poison, i32 %x, i32 0 %splat = shufflevector <4 x i32> %ins, <4 x i32> poison, <4 x i32> zeroinitializer ret <4 x i32> %splat } define <4 x i32> @splat_idx_v4i32(<4 x i32> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <4 x i32> %v, i64 %idx %ins = insertelement <4 x i32> poison, i32 %x, i32 0 %splat = shufflevector <4 x i32> %ins, <4 x i32> poison, <4 x i32> zeroinitializer ret <4 x i32> %splat } define <8 x i16> @splat_c4_v8i16(<8 x i16> %v) { ; CHECK-LABEL: splat_c4_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vi v9, v8, 4 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <8 x i16> %v, i32 4 %ins = insertelement <8 x i16> poison, i16 %x, i32 0 %splat = shufflevector <8 x i16> %ins, <8 x i16> poison, <8 x i32> zeroinitializer ret <8 x i16> %splat } define <8 x i16> @splat_idx_v8i16(<8 x i16> %v, i64 %idx) { ; CHECK-LABEL: splat_idx_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vrgather.vx v9, v8, a0 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %x = extractelement <8 x i16> %v, i64 %idx %ins = insertelement <8 x i16> poison, i16 %x, i32 0 %splat = shufflevector <8 x i16> %ins, <8 x i16> poison, <8 x i32> zeroinitializer ret <8 x i16> %splat } define <4 x i8> @buildvec_not_vid_v4i8_1() { ; CHECK-LABEL: buildvec_not_vid_v4i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 12320 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ret <4 x i8> } define <4 x i8> @buildvec_not_vid_v4i8_2() { ; CHECK-LABEL: buildvec_not_vid_v4i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: lui a0, 16 ; CHECK-NEXT: addi a0, a0, 771 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret ret <4 x i8> } ; We match this as a VID sequence (-3 / 8) + 5 but choose not to introduce ; division to compute it. define <16 x i8> @buildvec_not_vid_v16i8() { ; CHECK-LABEL: buildvec_not_vid_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v9, 3 ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vsetivli zero, 7, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v9, 6 ; CHECK-NEXT: ret ret <16 x i8> } define <8 x i32> @prefix_overwrite(<8 x i32> %vin, i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: prefix_overwrite: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: vslideup.vi v8, v10, 1 ; CHECK-NEXT: vmv.s.x v10, a2 ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: vmv.s.x v10, a3 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 3 ; CHECK-NEXT: ret %v0 = insertelement <8 x i32> %vin, i32 %a, i32 0 %v1 = insertelement <8 x i32> %v0, i32 %b, i32 1 %v2 = insertelement <8 x i32> %v1, i32 %c, i32 2 %v3 = insertelement <8 x i32> %v2, i32 %d, i32 3 ret <8 x i32> %v3 } define <8 x i32> @suffix_overwrite(<8 x i32> %vin, i32 %a, i32 %b, i32 %c, i32 %d) { ; CHECK-LABEL: suffix_overwrite: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vslideup.vi v8, v10, 4 ; CHECK-NEXT: vmv.s.x v10, a1 ; CHECK-NEXT: vsetivli zero, 6, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 5 ; CHECK-NEXT: vmv.s.x v10, a2 ; CHECK-NEXT: vsetivli zero, 7, e32, m2, tu, ma ; CHECK-NEXT: vslideup.vi v8, v10, 6 ; CHECK-NEXT: vmv.s.x v10, a3 ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 7 ; CHECK-NEXT: ret %v0 = insertelement <8 x i32> %vin, i32 %a, i32 4 %v1 = insertelement <8 x i32> %v0, i32 %b, i32 5 %v2 = insertelement <8 x i32> %v1, i32 %c, i32 6 %v3 = insertelement <8 x i32> %v2, i32 %d, i32 7 ret <8 x i32> %v3 } define <4 x i64> @v4xi64_exact(i64 %a, i64 %b, i64 %c, i64 %d) vscale_range(2,2) { ; RV32-LABEL: v4xi64_exact: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a4 ; RV32-NEXT: vslide1down.vx v8, v8, a5 ; RV32-NEXT: vslide1down.vx v8, v8, a6 ; RV32-NEXT: vslide1down.vx v9, v8, a7 ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v8, v8, a3 ; RV32-NEXT: ret ; ; RV64V-LABEL: v4xi64_exact: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vmv.v.x v8, a2 ; RV64V-NEXT: vslide1down.vx v9, v8, a3 ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: vslide1down.vx v8, v8, a1 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: v4xi64_exact: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: sd a4, 24(a0) ; RV64ZVE32-NEXT: sd a3, 16(a0) ; RV64ZVE32-NEXT: sd a2, 8(a0) ; RV64ZVE32-NEXT: sd a1, 0(a0) ; RV64ZVE32-NEXT: ret %v1 = insertelement <4 x i64> poison, i64 %a, i32 0 %v2 = insertelement <4 x i64> %v1, i64 %b, i32 1 %v3 = insertelement <4 x i64> %v2, i64 %c, i32 2 %v4 = insertelement <4 x i64> %v3, i64 %d, i32 3 ret <4 x i64> %v4 } define <8 x i64> @v8xi64_exact(i64 %a, i64 %b, i64 %c, i64 %d, i64 %e, i64 %f, i64 %g, i64 %h) vscale_range(2,2) { ; RV32-LABEL: v8xi64_exact: ; RV32: # %bb.0: ; RV32-NEXT: lw t0, 28(sp) ; RV32-NEXT: lw t1, 24(sp) ; RV32-NEXT: lw t2, 20(sp) ; RV32-NEXT: lw t3, 12(sp) ; RV32-NEXT: lw t4, 8(sp) ; RV32-NEXT: lw t5, 4(sp) ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a4 ; RV32-NEXT: vslide1down.vx v8, v8, a5 ; RV32-NEXT: vslide1down.vx v8, v8, a6 ; RV32-NEXT: vslide1down.vx v9, v8, a7 ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vlse32.v v10, (sp), zero ; RV32-NEXT: vslide1down.vx v8, v8, a3 ; RV32-NEXT: addi a0, sp, 16 ; RV32-NEXT: vlse32.v v11, (a0), zero ; RV32-NEXT: vslide1down.vx v10, v10, t5 ; RV32-NEXT: vslide1down.vx v10, v10, t4 ; RV32-NEXT: vslide1down.vx v10, v10, t3 ; RV32-NEXT: vslide1down.vx v11, v11, t2 ; RV32-NEXT: vslide1down.vx v11, v11, t1 ; RV32-NEXT: vslide1down.vx v11, v11, t0 ; RV32-NEXT: ret ; ; RV64V-LABEL: v8xi64_exact: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vmv.v.x v8, a2 ; RV64V-NEXT: vslide1down.vx v9, v8, a3 ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: vslide1down.vx v8, v8, a1 ; RV64V-NEXT: vmv.v.x v10, a4 ; RV64V-NEXT: vslide1down.vx v10, v10, a5 ; RV64V-NEXT: vmv.v.x v11, a6 ; RV64V-NEXT: vslide1down.vx v11, v11, a7 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: v8xi64_exact: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: ld t0, 0(sp) ; RV64ZVE32-NEXT: sd t0, 56(a0) ; RV64ZVE32-NEXT: sd a7, 48(a0) ; RV64ZVE32-NEXT: sd a6, 40(a0) ; RV64ZVE32-NEXT: sd a5, 32(a0) ; RV64ZVE32-NEXT: sd a4, 24(a0) ; RV64ZVE32-NEXT: sd a3, 16(a0) ; RV64ZVE32-NEXT: sd a2, 8(a0) ; RV64ZVE32-NEXT: sd a1, 0(a0) ; RV64ZVE32-NEXT: ret %v1 = insertelement <8 x i64> poison, i64 %a, i32 0 %v2 = insertelement <8 x i64> %v1, i64 %b, i32 1 %v3 = insertelement <8 x i64> %v2, i64 %c, i32 2 %v4 = insertelement <8 x i64> %v3, i64 %d, i32 3 %v5 = insertelement <8 x i64> %v4, i64 %e, i32 4 %v6 = insertelement <8 x i64> %v5, i64 %f, i32 5 %v7 = insertelement <8 x i64> %v6, i64 %g, i32 6 %v8 = insertelement <8 x i64> %v7, i64 %h, i32 7 ret <8 x i64> %v8 } define <8 x i64> @v8xi64_exact_equal_halves(i64 %a, i64 %b, i64 %c, i64 %d) vscale_range(2,2) { ; RV32-LABEL: v8xi64_exact_equal_halves: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a4 ; RV32-NEXT: vslide1down.vx v8, v8, a5 ; RV32-NEXT: vslide1down.vx v8, v8, a6 ; RV32-NEXT: vslide1down.vx v9, v8, a7 ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v8, v8, a3 ; RV32-NEXT: vmv.v.v v10, v8 ; RV32-NEXT: vmv.v.v v11, v9 ; RV32-NEXT: ret ; ; RV64V-LABEL: v8xi64_exact_equal_halves: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vmv.v.x v8, a2 ; RV64V-NEXT: vslide1down.vx v9, v8, a3 ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: vslide1down.vx v8, v8, a1 ; RV64V-NEXT: vmv.v.v v10, v8 ; RV64V-NEXT: vmv.v.v v11, v9 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: v8xi64_exact_equal_halves: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: sd a4, 56(a0) ; RV64ZVE32-NEXT: sd a3, 48(a0) ; RV64ZVE32-NEXT: sd a2, 40(a0) ; RV64ZVE32-NEXT: sd a1, 32(a0) ; RV64ZVE32-NEXT: sd a4, 24(a0) ; RV64ZVE32-NEXT: sd a3, 16(a0) ; RV64ZVE32-NEXT: sd a2, 8(a0) ; RV64ZVE32-NEXT: sd a1, 0(a0) ; RV64ZVE32-NEXT: ret %v1 = insertelement <8 x i64> poison, i64 %a, i32 0 %v2 = insertelement <8 x i64> %v1, i64 %b, i32 1 %v3 = insertelement <8 x i64> %v2, i64 %c, i32 2 %v4 = insertelement <8 x i64> %v3, i64 %d, i32 3 %v5 = insertelement <8 x i64> %v4, i64 %a, i32 4 %v6 = insertelement <8 x i64> %v5, i64 %b, i32 5 %v7 = insertelement <8 x i64> %v6, i64 %c, i32 6 %v8 = insertelement <8 x i64> %v7, i64 %d, i32 7 ret <8 x i64> %v8 } define <8 x i64> @v8xi64_exact_undef_suffix(i64 %a, i64 %b, i64 %c, i64 %d) vscale_range(2,2) { ; RV32-LABEL: v8xi64_exact_undef_suffix: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a4 ; RV32-NEXT: vslide1down.vx v8, v8, a5 ; RV32-NEXT: vslide1down.vx v8, v8, a6 ; RV32-NEXT: vslide1down.vx v9, v8, a7 ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v8, v8, a3 ; RV32-NEXT: ret ; ; RV64V-LABEL: v8xi64_exact_undef_suffix: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vmv.v.x v8, a2 ; RV64V-NEXT: vslide1down.vx v9, v8, a3 ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: vslide1down.vx v8, v8, a1 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: v8xi64_exact_undef_suffix: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: sd a4, 24(a0) ; RV64ZVE32-NEXT: sd a3, 16(a0) ; RV64ZVE32-NEXT: sd a2, 8(a0) ; RV64ZVE32-NEXT: sd a1, 0(a0) ; RV64ZVE32-NEXT: ret %v1 = insertelement <8 x i64> poison, i64 %a, i32 0 %v2 = insertelement <8 x i64> %v1, i64 %b, i32 1 %v3 = insertelement <8 x i64> %v2, i64 %c, i32 2 %v4 = insertelement <8 x i64> %v3, i64 %d, i32 3 ret <8 x i64> %v4 } define <8 x i64> @v8xi64_exact_undef_prefix(i64 %a, i64 %b, i64 %c, i64 %d) vscale_range(2,2) { ; RV32-LABEL: v8xi64_exact_undef_prefix: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v8, a4 ; RV32-NEXT: vslide1down.vx v8, v8, a5 ; RV32-NEXT: vslide1down.vx v8, v8, a6 ; RV32-NEXT: vslide1down.vx v11, v8, a7 ; RV32-NEXT: vmv.v.x v8, a0 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v10, v8, a3 ; RV32-NEXT: ret ; ; RV64V-LABEL: v8xi64_exact_undef_prefix: ; RV64V: # %bb.0: ; RV64V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64V-NEXT: vmv.v.x v8, a2 ; RV64V-NEXT: vslide1down.vx v11, v8, a3 ; RV64V-NEXT: vmv.v.x v8, a0 ; RV64V-NEXT: vslide1down.vx v10, v8, a1 ; RV64V-NEXT: ret ; ; RV64ZVE32-LABEL: v8xi64_exact_undef_prefix: ; RV64ZVE32: # %bb.0: ; RV64ZVE32-NEXT: sd a4, 56(a0) ; RV64ZVE32-NEXT: sd a3, 48(a0) ; RV64ZVE32-NEXT: sd a2, 40(a0) ; RV64ZVE32-NEXT: sd a1, 32(a0) ; RV64ZVE32-NEXT: ret %v1 = insertelement <8 x i64> poison, i64 %a, i32 4 %v2 = insertelement <8 x i64> %v1, i64 %b, i32 5 %v3 = insertelement <8 x i64> %v2, i64 %c, i32 6 %v4 = insertelement <8 x i64> %v3, i64 %d, i32 7 ret <8 x i64> %v4 }