; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s define @insert_nxv8i32_nxv4i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 0) ret %v } define @insert_nxv8i32_nxv4i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v10, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv8i32( %vec, %subvec, i64 4) ret %v } define @insert_nxv8i32_nxv2i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 0) ret %v } define @insert_nxv8i32_nxv2i32_2( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 2) ret %v } define @insert_nxv8i32_nxv2i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 4) ret %v } define @insert_nxv8i32_nxv2i32_6( %vec, %subvec) { ; CHECK-LABEL: insert_nxv8i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v11, v12 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv8i32( %vec, %subvec, i64 6) ret %v } define @insert_nxv1i8_nxv4i8_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv1i8_nxv4i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 0) ret %v } define @insert_nxv1i8_nxv4i8_3( %vec, %subvec) { ; CHECK-LABEL: insert_nxv1i8_nxv4i8_3: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v8, v9, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv4i8( %vec, %subvec, i64 3) ret %v } define @insert_nxv16i32_nxv8i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 0) ret %v } define @insert_nxv16i32_nxv8i32_8( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv8i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v12, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv8i32.nxv16i32( %vec, %subvec, i64 8) ret %v } define @insert_nxv16i32_nxv4i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 0) ret %v } define @insert_nxv16i32_nxv4i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v10, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 4) ret %v } define @insert_nxv16i32_nxv4i32_8( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v12, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 8) ret %v } define @insert_nxv16i32_nxv4i32_12( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv4i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v14, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv4i32.nxv16i32( %vec, %subvec, i64 12) ret %v } define @insert_nxv16i32_nxv2i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 0) ret %v } define @insert_nxv16i32_nxv2i32_2( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 2) ret %v } define @insert_nxv16i32_nxv2i32_4( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 4) ret %v } define @insert_nxv16i32_nxv2i32_6( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v11, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 6) ret %v } define @insert_nxv16i32_nxv2i32_8( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v12, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 8) ret %v } define @insert_nxv16i32_nxv2i32_10( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_10: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v13, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 10) ret %v } define @insert_nxv16i32_nxv2i32_12( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v14, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 12) ret %v } define @insert_nxv16i32_nxv2i32_14( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv2i32_14: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v15, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2i32.nxv16i32( %vec, %subvec, i64 14) ret %v } define @insert_nxv16i32_nxv1i32_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 0) ret %v } define @insert_nxv16i32_nxv1i32_1( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv1i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 1) ret %v } define @insert_nxv16i32_nxv1i32_6( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i32_nxv1i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vmv.v.v v11, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i32.nxv16i32( %vec, %subvec, i64 6) ret %v } define @insert_nxv16i8_nxv1i8_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i8_nxv1i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 0) ret %v } define @insert_nxv16i8_nxv1i8_1( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i8_nxv1i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 1) ret %v } define @insert_nxv16i8_nxv1i8_2( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i8_nxv1i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 2) ret %v } define @insert_nxv16i8_nxv1i8_3( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i8_nxv1i8_3: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a1, a1, a0 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 3) ret %v } define @insert_nxv16i8_nxv1i8_7( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i8_nxv1i8_7: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 7) ret %v } define @insert_nxv16i8_nxv1i8_15( %vec, %subvec) { ; CHECK-LABEL: insert_nxv16i8_nxv1i8_15: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: sub a1, a0, a1 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, tu, ma ; CHECK-NEXT: vslideup.vx v9, v10, a1 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1i8.nxv16i8( %vec, %subvec, i64 15) ret %v } define @insert_nxv32f16_nxv2f16_0( %vec, %subvec) { ; CHECK-LABEL: insert_nxv32f16_nxv2f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, tu, ma ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 0) ret %v } define @insert_nxv32f16_nxv2f16_2( %vec, %subvec) { ; CHECK-LABEL: insert_nxv32f16_nxv2f16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 2) ret %v } define @insert_nxv32f16_nxv2f16_26( %vec, %subvec) { ; CHECK-LABEL: insert_nxv32f16_nxv2f16_26: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v14, v16, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv2f16.nxv32f16( %vec, %subvec, i64 26) ret %v } define @insert_nxv32f16_undef_nxv1f16_0( %subvec) { ; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 0) ret %v } define @insert_nxv32f16_undef_nxv1f16_26( %subvec) { ; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_26: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma ; CHECK-NEXT: vslideup.vx v14, v8, a0 ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv1f16.nxv32f16( undef, %subvec, i64 26) ret %v } define @insert_nxv32i1_nxv8i1_0( %v, %sv) { ; CHECK-LABEL: insert_nxv32i1_nxv8i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmv.v.v v0, v8 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 0) ret %vec } define @insert_nxv32i1_nxv8i1_8( %v, %sv) { ; CHECK-LABEL: insert_nxv32i1_nxv8i1_8: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v0, v8, a0 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv8i1.nxv32i1( %v, %sv, i64 8) ret %vec } define @insert_nxv4i1_nxv1i1_0( %v, %sv) { ; CHECK-LABEL: insert_nxv4i1_nxv1i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, tu, ma ; CHECK-NEXT: vmv.v.v v9, v8 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 0) ret %vec } define @insert_nxv4i1_nxv1i1_2( %v, %sv) { ; CHECK-LABEL: insert_nxv4i1_nxv1i1_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v9, 0 ; CHECK-NEXT: vmerge.vim v9, v9, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a1 ; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv1i1.nxv4i1( %v, %sv, i64 2) ret %vec } declare @llvm.vector.insert.nxv8i64.nxv16i64(, , i64) define void @insert_nxv8i64_nxv16i64( %sv0, %sv1, * %out) { ; CHECK-LABEL: insert_nxv8i64_nxv16i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vs8r.v v16, (a0) ; CHECK-NEXT: ret %v0 = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) %v = call @llvm.vector.insert.nxv8i64.nxv16i64( %v0, %sv1, i64 8) store %v, * %out ret void } define void @insert_nxv8i64_nxv16i64_lo( %sv0, * %out) { ; CHECK-LABEL: insert_nxv8i64_nxv16i64_lo: ; CHECK: # %bb.0: ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 0) store %v, * %out ret void } define void @insert_nxv8i64_nxv16i64_hi( %sv0, * %out) { ; CHECK-LABEL: insert_nxv8i64_nxv16i64_hi: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a1, vlenb ; CHECK-NEXT: slli a1, a1, 3 ; CHECK-NEXT: add a0, a0, a1 ; CHECK-NEXT: vs8r.v v8, (a0) ; CHECK-NEXT: ret %v = call @llvm.vector.insert.nxv8i64.nxv16i64( undef, %sv0, i64 8) store %v, * %out ret void } ; We should be able to widen the <3 x i64> subvector to a <4 x i64> here because ; we know that the minimum vscale is 2 define @insert_nxv2i64_nxv3i64(<3 x i64> %sv) #0 { ; CHECK-LABEL: insert_nxv2i64_nxv3i64: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %vec = call @llvm.vector.insert.nxv2i64.v3i64( undef, <3 x i64> %sv, i64 0) ret %vec } ; This shows a case where we were miscompiling because the index of the ; outer expects a scalable inner and the inner most subvector is fixed length. ; The code generated happens to be correct if VLEN=128, but is wrong if ; VLEN=256. define @insert_insert_combine(<2 x i32> %subvec) { ; CHECK-LABEL: insert_insert_combine: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: ret %inner = call @llvm.vector.insert.nxv4i32.v2i32( undef, <2 x i32> %subvec, i64 0) %outer = call @llvm.vector.insert.nxv4i32.nxv8i32( undef, %inner, i64 4) ret %outer } ; We can combine these two (even with non-zero index on the outer) because ; the vector must be an even multiple. define @insert_insert_combine2( %subvec) { ; CHECK-LABEL: insert_insert_combine2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v8 ; CHECK-NEXT: ret %inner = call @llvm.vector.insert.nxv2i32.nxv4i32( undef, %subvec, i64 0) %outer = call @llvm.vector.insert.nxv4i32.nxv8i32( undef, %inner, i64 4) ret %outer } attributes #0 = { vscale_range(2,1024) } declare @llvm.vector.insert.nxv1i1.nxv4i1(, , i64) declare @llvm.vector.insert.nxv8i1.nxv32i1(, , i64) declare @llvm.vector.insert.nxv1i8.nxv16i8(, , i64) declare @llvm.vector.insert.nxv1f16.nxv32f16(, , i64) declare @llvm.vector.insert.nxv2f16.nxv32f16(, , i64) declare @llvm.vector.insert.nxv1i8.nxv4i8(, , i64 %idx) declare @llvm.vector.insert.nxv2i32.nxv4i32(, , i64) declare @llvm.vector.insert.nxv4i32.v2i32(, <2 x i32>, i64) declare @llvm.vector.insert.nxv2i32.nxv8i32(, , i64 %idx) declare @llvm.vector.insert.nxv4i32.nxv8i32(, , i64 %idx) declare @llvm.vector.insert.nxv1i32.nxv16i32(, , i64 %idx) declare @llvm.vector.insert.nxv2i32.nxv16i32(, , i64 %idx) declare @llvm.vector.insert.nxv4i32.nxv16i32(, , i64 %idx) declare @llvm.vector.insert.nxv8i32.nxv16i32(, , i64 %idx) declare @llvm.vector.insert.nxv2i64.v3i64(, <3 x i64>, i64 %idx)