; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -target-abi=ilp32d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -target-abi=lp64d -mattr=+v,+zfh,+zvfh,+f,+d -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 define <4 x i32> @insertelt_v4i32_0(<4 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %b = insertelement <4 x i32> %a, i32 %y, i32 0 ret <4 x i32> %b } define <4 x i32> @insertelt_v4i32_3(<4 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v4i32_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vslideup.vi v8, v9, 3 ; CHECK-NEXT: ret %b = insertelement <4 x i32> %a, i32 %y, i32 3 ret <4 x i32> %b } define <4 x i32> @insertelt_v4i32_idx(<4 x i32> %a, i32 %y, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_v4i32_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a2, a1, 1 ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %b = insertelement <4 x i32> %a, i32 %y, i32 %idx ret <4 x i32> %b } define <32 x i32> @insertelt_v32i32_0(<32 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v32i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %b = insertelement <32 x i32> %a, i32 %y, i32 0 ret <32 x i32> %b } ; FIXME: Should only require an m2 slideup define <32 x i32> @insertelt_v32i32_4(<32 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v32i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 5, e32, m2, tu, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vslideup.vi v8, v16, 4 ; CHECK-NEXT: ret %b = insertelement <32 x i32> %a, i32 %y, i32 4 ret <32 x i32> %b } define <32 x i32> @insertelt_v32i32_31(<32 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v32i32_31: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: vslideup.vi v8, v16, 31 ; CHECK-NEXT: ret %b = insertelement <32 x i32> %a, i32 %y, i32 31 ret <32 x i32> %b } define <32 x i32> @insertelt_v32i32_idx(<32 x i32> %a, i32 %y, i32 zeroext %idx) { ; CHECK-LABEL: insertelt_v32i32_idx: ; CHECK: # %bb.0: ; CHECK-NEXT: li a2, 32 ; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma ; CHECK-NEXT: vmv.s.x v16, a0 ; CHECK-NEXT: addi a0, a1, 1 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a1 ; CHECK-NEXT: ret %b = insertelement <32 x i32> %a, i32 %y, i32 %idx ret <32 x i32> %b } define <64 x i32> @insertelt_v64i32_0(<64 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v64i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %b = insertelement <64 x i32> %a, i32 %y, i32 0 ret <64 x i32> %b } define <64 x i32> @insertelt_v64i32_63(<64 x i32> %a, i32 %y) { ; CHECK-LABEL: insertelt_v64i32_63: ; CHECK: # %bb.0: ; CHECK-NEXT: li a1, 32 ; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma ; CHECK-NEXT: vmv.s.x v24, a0 ; CHECK-NEXT: vslideup.vi v16, v24, 31 ; CHECK-NEXT: ret %b = insertelement <64 x i32> %a, i32 %y, i32 63 ret <64 x i32> %b } define <64 x i32> @insertelt_v64i32_idx(<64 x i32> %a, i32 %y, i32 zeroext %idx) { ; RV32-LABEL: insertelt_v64i32_idx: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -384 ; RV32-NEXT: .cfi_def_cfa_offset 384 ; RV32-NEXT: sw ra, 380(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 376(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: addi s0, sp, 384 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: andi sp, sp, -128 ; RV32-NEXT: andi a1, a1, 63 ; RV32-NEXT: slli a1, a1, 2 ; RV32-NEXT: mv a2, sp ; RV32-NEXT: add a1, a2, a1 ; RV32-NEXT: addi a3, sp, 128 ; RV32-NEXT: li a4, 32 ; RV32-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV32-NEXT: vse32.v v16, (a3) ; RV32-NEXT: vse32.v v8, (a2) ; RV32-NEXT: sw a0, 0(a1) ; RV32-NEXT: vle32.v v8, (a2) ; RV32-NEXT: vle32.v v16, (a3) ; RV32-NEXT: addi sp, s0, -384 ; RV32-NEXT: lw ra, 380(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 376(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 384 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v64i32_idx: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -384 ; RV64-NEXT: .cfi_def_cfa_offset 384 ; RV64-NEXT: sd ra, 376(sp) # 8-byte Folded Spill ; RV64-NEXT: sd s0, 368(sp) # 8-byte Folded Spill ; RV64-NEXT: .cfi_offset ra, -8 ; RV64-NEXT: .cfi_offset s0, -16 ; RV64-NEXT: addi s0, sp, 384 ; RV64-NEXT: .cfi_def_cfa s0, 0 ; RV64-NEXT: andi sp, sp, -128 ; RV64-NEXT: andi a1, a1, 63 ; RV64-NEXT: slli a1, a1, 2 ; RV64-NEXT: mv a2, sp ; RV64-NEXT: add a1, a2, a1 ; RV64-NEXT: addi a3, sp, 128 ; RV64-NEXT: li a4, 32 ; RV64-NEXT: vsetvli zero, a4, e32, m8, ta, ma ; RV64-NEXT: vse32.v v16, (a3) ; RV64-NEXT: vse32.v v8, (a2) ; RV64-NEXT: sw a0, 0(a1) ; RV64-NEXT: vle32.v v8, (a2) ; RV64-NEXT: vle32.v v16, (a3) ; RV64-NEXT: addi sp, s0, -384 ; RV64-NEXT: ld ra, 376(sp) # 8-byte Folded Reload ; RV64-NEXT: ld s0, 368(sp) # 8-byte Folded Reload ; RV64-NEXT: addi sp, sp, 384 ; RV64-NEXT: ret %b = insertelement <64 x i32> %a, i32 %y, i32 %idx ret <64 x i32> %b } ; FIXME: This codegen needs to be improved. These tests previously asserted ; type legalizing the i64 type on RV32. define <4 x i64> @insertelt_v4i64(<4 x i64> %a, i64 %y) { ; RV32-LABEL: insertelt_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v8, a0 ; RV32-NEXT: vslide1down.vx v10, v10, a1 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vslideup.vi v8, v10, 3 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: vslideup.vi v8, v10, 3 ; RV64-NEXT: ret %b = insertelement <4 x i64> %a, i64 %y, i32 3 ret <4 x i64> %b } define void @insertelt_v4i64_store(ptr %x, i64 %y) { ; RV32-LABEL: insertelt_v4i64_store: ; RV32: # %bb.0: ; RV32-NEXT: sw a2, 28(a0) ; RV32-NEXT: sw a1, 24(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v4i64_store: ; RV64: # %bb.0: ; RV64-NEXT: sd a1, 24(a0) ; RV64-NEXT: ret %a = load <4 x i64>, ptr %x %b = insertelement <4 x i64> %a, i64 %y, i32 3 store <4 x i64> %b, ptr %x ret void } ; This uses a non-power of 2 type so that it isn't an MVT. ; The align keeps the type legalizer from using a 256 bit load so we must split ; it. This some operations that weren't support for scalable vectors when ; this test was written. define <3 x i64> @insertelt_v3i64(<3 x i64> %a, i64 %y) { ; RV32-LABEL: insertelt_v3i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 3 ; RV32-NEXT: vmv.x.s a2, v9 ; RV32-NEXT: vslidedown.vi v9, v8, 2 ; RV32-NEXT: vmv.x.s a3, v9 ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vmv.x.s a4, v9 ; RV32-NEXT: vmv.x.s a5, v8 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v8, a5 ; RV32-NEXT: vslide1down.vx v8, v8, a4 ; RV32-NEXT: vslide1down.vx v8, v8, a3 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v8, v8, a0 ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslidedown.vi v8, v8, 2 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v3i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-NEXT: vmv.x.s a1, v9 ; RV64-NEXT: vmv.x.s a2, v8 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vmv.v.x v8, a2 ; RV64-NEXT: vslide1down.vx v8, v8, a1 ; RV64-NEXT: vslide1down.vx v8, v8, a0 ; RV64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-NEXT: ret %b = insertelement <3 x i64> %a, i64 %y, i32 2 ret <3 x i64> %b } define void @insertelt_v3i64_store(ptr %x, i64 %y) { ; RV32-LABEL: insertelt_v3i64_store: ; RV32: # %bb.0: ; RV32-NEXT: sw a2, 20(a0) ; RV32-NEXT: sw a1, 16(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v3i64_store: ; RV64: # %bb.0: ; RV64-NEXT: sd a1, 16(a0) ; RV64-NEXT: ret %a = load <3 x i64>, ptr %x, align 8 %b = insertelement <3 x i64> %a, i64 %y, i32 2 store <3 x i64> %b, ptr %x ret void } define <16 x i8> @insertelt_v16i8(<16 x i8> %a, i8 %y) { ; CHECK-LABEL: insertelt_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 15, e8, m1, tu, ma ; CHECK-NEXT: vmv.s.x v9, a0 ; CHECK-NEXT: vslideup.vi v8, v9, 14 ; CHECK-NEXT: ret %b = insertelement <16 x i8> %a, i8 %y, i32 14 ret <16 x i8> %b } define void @insertelt_v16i8_store(ptr %x, i8 %y) { ; CHECK-LABEL: insertelt_v16i8_store: ; CHECK: # %bb.0: ; CHECK-NEXT: sb a1, 14(a0) ; CHECK-NEXT: ret %a = load <16 x i8>, ptr %x %b = insertelement <16 x i8> %a, i8 %y, i32 14 store <16 x i8> %b, ptr %x ret void } define <32 x i16> @insertelt_v32i16(<32 x i16> %a, i16 %y, i32 %idx) { ; RV32-LABEL: insertelt_v32i16: ; RV32: # %bb.0: ; RV32-NEXT: li a2, 32 ; RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; RV32-NEXT: vmv.s.x v12, a0 ; RV32-NEXT: addi a0, a1, 1 ; RV32-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; RV32-NEXT: vslideup.vx v8, v12, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v32i16: ; RV64: # %bb.0: ; RV64-NEXT: li a2, 32 ; RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma ; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: slli a1, a1, 32 ; RV64-NEXT: srli a1, a1, 32 ; RV64-NEXT: addi a0, a1, 1 ; RV64-NEXT: vsetvli zero, a0, e16, m4, tu, ma ; RV64-NEXT: vslideup.vx v8, v12, a1 ; RV64-NEXT: ret %b = insertelement <32 x i16> %a, i16 %y, i32 %idx ret <32 x i16> %b } define void @insertelt_v32i16_store(ptr %x, i16 %y, i32 %idx) { ; CHECK-LABEL: insertelt_v32i16_store: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a2, a2, 31 ; CHECK-NEXT: slli a2, a2, 1 ; CHECK-NEXT: add a0, a0, a2 ; CHECK-NEXT: sh a1, 0(a0) ; CHECK-NEXT: ret %a = load <32 x i16>, ptr %x %b = insertelement <32 x i16> %a, i16 %y, i32 %idx store <32 x i16> %b, ptr %x ret void } define <8 x float> @insertelt_v8f32(<8 x float> %a, float %y, i32 %idx) { ; RV32-LABEL: insertelt_v8f32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV32-NEXT: vfmv.s.f v10, fa0 ; RV32-NEXT: addi a1, a0, 1 ; RV32-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV32-NEXT: vslideup.vx v8, v10, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8f32: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e32, m1, ta, ma ; RV64-NEXT: vfmv.s.f v10, fa0 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: addi a1, a0, 1 ; RV64-NEXT: vsetvli zero, a1, e32, m2, tu, ma ; RV64-NEXT: vslideup.vx v8, v10, a0 ; RV64-NEXT: ret %b = insertelement <8 x float> %a, float %y, i32 %idx ret <8 x float> %b } define void @insertelt_v8f32_store(ptr %x, float %y, i32 %idx) { ; CHECK-LABEL: insertelt_v8f32_store: ; CHECK: # %bb.0: ; CHECK-NEXT: andi a1, a1, 7 ; CHECK-NEXT: slli a1, a1, 2 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: fsw fa0, 0(a0) ; CHECK-NEXT: ret %a = load <8 x float>, ptr %x %b = insertelement <8 x float> %a, float %y, i32 %idx store <8 x float> %b, ptr %x ret void } define <8 x i64> @insertelt_v8i64_0(<8 x i64> %a, ptr %x) { ; CHECK-LABEL: insertelt_v8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vsetivli zero, 8, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %b = insertelement <8 x i64> %a, i64 -1, i32 0 ret <8 x i64> %b } define void @insertelt_v8i64_0_store(ptr %x) { ; RV32-LABEL: insertelt_v8i64_0_store: ; RV32: # %bb.0: ; RV32-NEXT: li a1, -1 ; RV32-NEXT: sw a1, 4(a0) ; RV32-NEXT: sw a1, 0(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8i64_0_store: ; RV64: # %bb.0: ; RV64-NEXT: li a1, -1 ; RV64-NEXT: sd a1, 0(a0) ; RV64-NEXT: ret %a = load <8 x i64>, ptr %x %b = insertelement <8 x i64> %a, i64 -1, i32 0 store <8 x i64> %b, ptr %x ret void } define <8 x i64> @insertelt_v8i64(<8 x i64> %a, i32 %idx) { ; RV32-LABEL: insertelt_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, -1 ; RV32-NEXT: addi a1, a0, 1 ; RV32-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV32-NEXT: vslideup.vx v8, v12, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v12, -1 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: addi a1, a0, 1 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vslideup.vx v8, v12, a0 ; RV64-NEXT: ret %b = insertelement <8 x i64> %a, i64 -1, i32 %idx ret <8 x i64> %b } define void @insertelt_v8i64_store(ptr %x, i32 %idx) { ; RV32-LABEL: insertelt_v8i64_store: ; RV32: # %bb.0: ; RV32-NEXT: andi a1, a1, 7 ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: li a1, -1 ; RV32-NEXT: sw a1, 4(a0) ; RV32-NEXT: sw a1, 0(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_v8i64_store: ; RV64: # %bb.0: ; RV64-NEXT: andi a1, a1, 7 ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: li a1, -1 ; RV64-NEXT: sd a1, 0(a0) ; RV64-NEXT: ret %a = load <8 x i64>, ptr %x %b = insertelement <8 x i64> %a, i64 -1, i32 %idx store <8 x i64> %b, ptr %x ret void } define <8 x i64> @insertelt_c6_v8i64_0(<8 x i64> %a, ptr %x) { ; CHECK-LABEL: insertelt_c6_v8i64_0: ; CHECK: # %bb.0: ; CHECK-NEXT: li a0, 6 ; CHECK-NEXT: vsetivli zero, 8, e64, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %b = insertelement <8 x i64> %a, i64 6, i32 0 ret <8 x i64> %b } define void @insertelt_c6_v8i64_0_store(ptr %x) { ; RV32-LABEL: insertelt_c6_v8i64_0_store: ; RV32: # %bb.0: ; RV32-NEXT: sw zero, 4(a0) ; RV32-NEXT: li a1, 6 ; RV32-NEXT: sw a1, 0(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c6_v8i64_0_store: ; RV64: # %bb.0: ; RV64-NEXT: li a1, 6 ; RV64-NEXT: sd a1, 0(a0) ; RV64-NEXT: ret %a = load <8 x i64>, ptr %x %b = insertelement <8 x i64> %a, i64 6, i32 0 store <8 x i64> %b, ptr %x ret void } define <8 x i64> @insertelt_c6_v8i64(<8 x i64> %a, i32 %idx) { ; RV32-LABEL: insertelt_c6_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 8, e64, m1, ta, ma ; RV32-NEXT: vmv.v.i v12, 6 ; RV32-NEXT: addi a1, a0, 1 ; RV32-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV32-NEXT: vslideup.vx v8, v12, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c6_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m1, ta, ma ; RV64-NEXT: vmv.v.i v12, 6 ; RV64-NEXT: slli a0, a0, 32 ; RV64-NEXT: srli a0, a0, 32 ; RV64-NEXT: addi a1, a0, 1 ; RV64-NEXT: vsetvli zero, a1, e64, m4, tu, ma ; RV64-NEXT: vslideup.vx v8, v12, a0 ; RV64-NEXT: ret %b = insertelement <8 x i64> %a, i64 6, i32 %idx ret <8 x i64> %b } define void @insertelt_c6_v8i64_store(ptr %x, i32 %idx) { ; RV32-LABEL: insertelt_c6_v8i64_store: ; RV32: # %bb.0: ; RV32-NEXT: andi a1, a1, 7 ; RV32-NEXT: slli a1, a1, 3 ; RV32-NEXT: add a0, a0, a1 ; RV32-NEXT: sw zero, 4(a0) ; RV32-NEXT: li a1, 6 ; RV32-NEXT: sw a1, 0(a0) ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c6_v8i64_store: ; RV64: # %bb.0: ; RV64-NEXT: andi a1, a1, 7 ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: li a1, 6 ; RV64-NEXT: sd a1, 0(a0) ; RV64-NEXT: ret %a = load <8 x i64>, ptr %x %b = insertelement <8 x i64> %a, i64 6, i32 %idx store <8 x i64> %b, ptr %x ret void } ; Test that using a insertelement at element 0 by a later operation doesn't ; crash the compiler. define void @insertelt_c6_v8i64_0_add(ptr %x, ptr %y) { ; CHECK-LABEL: insertelt_c6_v8i64_0_add: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v8, (a0) ; CHECK-NEXT: li a2, 6 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, tu, ma ; CHECK-NEXT: vmv.s.x v8, a2 ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vle64.v v12, (a1) ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: vse64.v v8, (a0) ; CHECK-NEXT: ret %a = load <8 x i64>, ptr %x %b = insertelement <8 x i64> %a, i64 6, i32 0 %c = load <8 x i64>, ptr %y %d = add <8 x i64> %b, %c store <8 x i64> %d, ptr %x ret void } ; The next batch of tests cover inserts into high LMUL vectors when the ; exact VLEM is known. FIXME: These can directly access the sub-registers define <16 x i32> @insertelt_c0_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c0_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v8, a0 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 0 ret <16 x i32> %v } define <16 x i32> @insertelt_c1_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c1_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vslideup.vi v8, v12, 1 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 1 ret <16 x i32> %v } define <16 x i32> @insertelt_c2_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c2_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vslideup.vi v8, v12, 2 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 2 ret <16 x i32> %v } define <16 x i32> @insertelt_c3_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c3_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vslideup.vi v8, v12, 3 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 3 ret <16 x i32> %v } define <16 x i32> @insertelt_c12_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c12_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v11, a0 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 12 ret <16 x i32> %v } define <16 x i32> @insertelt_c13_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c13_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vslideup.vi v11, v12, 1 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 13 ret <16 x i32> %v } define <16 x i32> @insertelt_c14_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c14_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 3, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vslideup.vi v11, v12, 2 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 14 ret <16 x i32> %v } define <16 x i32> @insertelt_c15_v16xi32_exact(<16 x i32> %vin, i32 %a) vscale_range(2,2) { ; CHECK-LABEL: insertelt_c15_v16xi32_exact: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, tu, ma ; CHECK-NEXT: vmv.s.x v12, a0 ; CHECK-NEXT: vslideup.vi v11, v12, 3 ; CHECK-NEXT: ret %v = insertelement <16 x i32> %vin, i32 %a, i32 15 ret <16 x i32> %v } define <8 x i64> @insertelt_c4_v8xi64_exact(<8 x i64> %vin, i64 %a) vscale_range(2,2) { ; RV32-LABEL: insertelt_c4_v8xi64_exact: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, m1, tu, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vslide1down.vx v10, v10, a1 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c4_v8xi64_exact: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 8, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v10, a0 ; RV64-NEXT: ret %v = insertelement <8 x i64> %vin, i64 %a, i32 4 ret <8 x i64> %v } define <8 x i64> @insertelt_c5_v8xi64_exact(<8 x i64> %vin, i64 %a) vscale_range(2,2) { ; RV32-LABEL: insertelt_c5_v8xi64_exact: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; RV32-NEXT: vslide1down.vx v12, v8, a0 ; RV32-NEXT: vslide1down.vx v12, v12, a1 ; RV32-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; RV32-NEXT: vslideup.vi v10, v12, 1 ; RV32-NEXT: ret ; ; RV64-LABEL: insertelt_c5_v8xi64_exact: ; RV64: # %bb.0: ; RV64-NEXT: vsetivli zero, 2, e64, m1, tu, ma ; RV64-NEXT: vmv.s.x v12, a0 ; RV64-NEXT: vslideup.vi v10, v12, 1 ; RV64-NEXT: ret %v = insertelement <8 x i64> %vin, i64 %a, i32 5 ret <8 x i64> %v }