; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck %s ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck %s ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB ; Integers define @vector_interleave_nxv32i1_nxv16i1( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv32i1_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v12, v10, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vwaddu.vv v16, v8, v12 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v16, a0, v12 ; CHECK-NEXT: vmsne.vi v8, v18, 0 ; CHECK-NEXT: vmsne.vi v0, v16, 0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; CHECK-NEXT: vslideup.vx v0, v8, a0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i1_nxv16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vmv.v.i v10, 0 ; ZVBB-NEXT: vmerge.vim v12, v10, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmerge.vim v8, v10, 1, v0 ; ZVBB-NEXT: vwsll.vi v16, v8, 8 ; ZVBB-NEXT: vwaddu.wv v16, v16, v12 ; ZVBB-NEXT: vmsne.vi v8, v18, 0 ; ZVBB-NEXT: vmsne.vi v0, v16, 0 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: add a1, a0, a0 ; ZVBB-NEXT: vsetvli zero, a1, e8, mf2, tu, ma ; ZVBB-NEXT: vslideup.vx v0, v8, a0 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv32i1( %a, %b) ret %res } define @vector_interleave_nxv32i8_nxv16i8( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv32i8_nxv16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i8_nxv16i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e8, m2, ta, ma ; ZVBB-NEXT: vwsll.vi v12, v10, 8 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8 ; ZVBB-NEXT: vmv4r.v v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv32i8( %a, %b) ret %res } define @vector_interleave_nxv16i16_nxv8i16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv16i16_nxv8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16i16_nxv8i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVBB-NEXT: vwsll.vi v12, v10, 16 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8 ; ZVBB-NEXT: vmv4r.v v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16i16( %a, %b) ret %res } define @vector_interleave_nxv8i32_nxv4i32( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv8i32_nxv4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8i32_nxv4i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; ZVBB-NEXT: vwsll.vx v12, v10, a0 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8 ; ZVBB-NEXT: vmv4r.v v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv8i32( %a, %b) ret %res } define @vector_interleave_nxv4i64_nxv2i64( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv4i64_nxv2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vand.vi v13, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v13, 0 ; CHECK-NEXT: vsrl.vi v16, v12, 1 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4i64_nxv2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; ZVBB-NEXT: vid.v v12 ; ZVBB-NEXT: vand.vi v13, v12, 1 ; ZVBB-NEXT: vmsne.vi v0, v13, 0 ; ZVBB-NEXT: vsrl.vi v16, v12, 1 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 ; ZVBB-NEXT: vmv.v.v v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4i64( %a, %b) ret %res } declare @llvm.experimental.vector.interleave2.nxv32i1(, ) declare @llvm.experimental.vector.interleave2.nxv32i8(, ) declare @llvm.experimental.vector.interleave2.nxv16i16(, ) declare @llvm.experimental.vector.interleave2.nxv8i32(, ) declare @llvm.experimental.vector.interleave2.nxv4i64(, ) define @vector_interleave_nxv128i1_nxv64i1( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv128i1_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v16, v24, 1, v0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v24, v8, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v24, a0, v16 ; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v24, 0 ; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v24, v12, v20 ; CHECK-NEXT: vwmaccu.vx v24, a0, v20 ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmsne.vi v8, v24, 0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv128i1_nxv64i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; ZVBB-NEXT: vmv.v.i v24, 0 ; ZVBB-NEXT: vmerge.vim v16, v24, 1, v0 ; ZVBB-NEXT: vmv1r.v v0, v8 ; ZVBB-NEXT: vmerge.vim v8, v24, 1, v0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v24, v8, 8 ; ZVBB-NEXT: vwaddu.wv v24, v24, v16 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; ZVBB-NEXT: vmsne.vi v0, v24, 0 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v24, v12, 8 ; ZVBB-NEXT: vwaddu.wv v24, v24, v20 ; ZVBB-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; ZVBB-NEXT: vmsne.vi v8, v24, 0 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv128i1( %a, %b) ret %res } define @vector_interleave_nxv128i8_nxv64i8( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv128i8_nxv64i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv128i8_nxv64i8: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vmv8r.v v24, v8 ; ZVBB-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v8, v16, 8 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24 ; ZVBB-NEXT: vwsll.vi v0, v20, 8 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv128i8( %a, %b) ret %res } define @vector_interleave_nxv64i16_nxv32i16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv64i16_nxv32i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64i16_nxv32i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vmv8r.v v24, v8 ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v8, v16, 16 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24 ; ZVBB-NEXT: vwsll.vi v0, v20, 16 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv64i16( %a, %b) ret %res } define @vector_interleave_nxv32i32_nxv16i32( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv32i32_nxv16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32i32_nxv16i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; ZVBB-NEXT: vwsll.vx v24, v16, a0 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwsll.vx v0, v20, a0 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv32i32( %a, %b) ret %res } define @vector_interleave_nxv16i64_nxv8i64( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv16i64_nxv8i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: vmv8r.v v0, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vid.v v24 ; CHECK-NEXT: vand.vi v26, v24, 1 ; CHECK-NEXT: vmsne.vi v10, v26, 0 ; CHECK-NEXT: vsrl.vi v8, v24, 1 ; CHECK-NEXT: vmv8r.v v24, v0 ; CHECK-NEXT: vmv4r.v v12, v4 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: vmv4r.v v28, v16 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v0, v24, v8 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vrgatherei16.vv v24, v16, v8 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmv.v.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16i64_nxv8i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: .cfi_def_cfa_offset 16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; ZVBB-NEXT: vmv8r.v v0, v8 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: srli a0, a0, 1 ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; ZVBB-NEXT: vid.v v24 ; ZVBB-NEXT: vand.vi v26, v24, 1 ; ZVBB-NEXT: vmsne.vi v10, v26, 0 ; ZVBB-NEXT: vsrl.vi v8, v24, 1 ; ZVBB-NEXT: vmv8r.v v24, v0 ; ZVBB-NEXT: vmv4r.v v12, v4 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t ; ZVBB-NEXT: vmv4r.v v28, v16 ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; ZVBB-NEXT: vmv4r.v v16, v12 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8 ; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; ZVBB-NEXT: vmv.v.v v16, v24 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16i64( %a, %b) ret %res } declare @llvm.experimental.vector.interleave2.nxv128i1(, ) declare @llvm.experimental.vector.interleave2.nxv128i8(, ) declare @llvm.experimental.vector.interleave2.nxv64i16(, ) declare @llvm.experimental.vector.interleave2.nxv32i32(, ) declare @llvm.experimental.vector.interleave2.nxv16i64(, ) ; Floats define @vector_interleave_nxv4f16_nxv2f16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv4f16_nxv2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v10, v8, a0 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f16_nxv2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; ZVBB-NEXT: vslidedown.vx v8, v10, a0 ; ZVBB-NEXT: add a1, a0, a0 ; ZVBB-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; ZVBB-NEXT: vslideup.vx v10, v8, a0 ; ZVBB-NEXT: vmv1r.v v8, v10 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4f16( %a, %b) ret %res } define @vector_interleave_nxv8f16_nxv4f16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv8f16_nxv4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f16_nxv4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m1, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv2r.v v8, v10 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv8f16( %a, %b) ret %res } define @vector_interleave_nxv4f32_nxv2f32( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv4f32_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f32_nxv2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; ZVBB-NEXT: vwsll.vx v10, v9, a0 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv2r.v v8, v10 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4f32( %a, %b) ret %res } define @vector_interleave_nxv16f16_nxv8f16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv16f16_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f16_nxv8f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; ZVBB-NEXT: vwsll.vi v12, v10, 16 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8 ; ZVBB-NEXT: vmv4r.v v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16f16( %a, %b) ret %res } define @vector_interleave_nxv8f32_nxv4f32( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv8f32_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v12, a0, v10 ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv8f32_nxv4f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli a1, zero, e32, m2, ta, ma ; ZVBB-NEXT: vwsll.vx v12, v10, a0 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8 ; ZVBB-NEXT: vmv4r.v v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv8f32( %a, %b) ret %res } define @vector_interleave_nxv4f64_nxv2f64( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv4f64_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vand.vi v13, v12, 1 ; CHECK-NEXT: vmsne.vi v0, v13, 0 ; CHECK-NEXT: vsrl.vi v16, v12, 1 ; CHECK-NEXT: vadd.vx v16, v16, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; CHECK-NEXT: vrgatherei16.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv4f64_nxv2f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: srli a0, a0, 2 ; ZVBB-NEXT: vsetvli a1, zero, e16, m1, ta, mu ; ZVBB-NEXT: vid.v v12 ; ZVBB-NEXT: vand.vi v13, v12, 1 ; ZVBB-NEXT: vmsne.vi v0, v13, 0 ; ZVBB-NEXT: vsrl.vi v16, v12, 1 ; ZVBB-NEXT: vadd.vx v16, v16, a0, v0.t ; ZVBB-NEXT: vsetvli zero, zero, e64, m4, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v12, v8, v16 ; ZVBB-NEXT: vmv.v.v v8, v12 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv4f64( %a, %b) ret %res } declare @llvm.experimental.vector.interleave2.nxv4f16(, ) declare @llvm.experimental.vector.interleave2.nxv8f16(, ) declare @llvm.experimental.vector.interleave2.nxv4f32(, ) declare @llvm.experimental.vector.interleave2.nxv16f16(, ) declare @llvm.experimental.vector.interleave2.nxv8f32(, ) declare @llvm.experimental.vector.interleave2.nxv4f64(, ) define @vector_interleave_nxv64f16_nxv32f16( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv64f16_nxv32f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv64f16_nxv32f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vmv8r.v v24, v8 ; ZVBB-NEXT: vsetvli a0, zero, e16, m4, ta, ma ; ZVBB-NEXT: vwsll.vi v8, v16, 16 ; ZVBB-NEXT: vwaddu.wv v8, v8, v24 ; ZVBB-NEXT: vwsll.vi v0, v20, 16 ; ZVBB-NEXT: vwaddu.wv v0, v0, v28 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv64f16( %a, %b) ret %res } define @vector_interleave_nxv32f32_nxv16f32( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv32f32_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv8r.v v24, v8 ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vwaddu.vv v8, v24, v16 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v8, a0, v16 ; CHECK-NEXT: vwaddu.vv v0, v28, v20 ; CHECK-NEXT: vwmaccu.vx v0, a0, v20 ; CHECK-NEXT: vmv8r.v v16, v0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv32f32_nxv16f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; ZVBB-NEXT: vwsll.vx v24, v16, a0 ; ZVBB-NEXT: vwaddu.wv v24, v24, v8 ; ZVBB-NEXT: vwsll.vx v0, v20, a0 ; ZVBB-NEXT: vwaddu.wv v0, v0, v12 ; ZVBB-NEXT: vmv8r.v v8, v24 ; ZVBB-NEXT: vmv8r.v v16, v0 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv32f32( %a, %b) ret %res } define @vector_interleave_nxv16f64_nxv8f64( %a, %b) { ; CHECK-LABEL: vector_interleave_nxv16f64_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: sub sp, sp, a0 ; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; CHECK-NEXT: vmv8r.v v0, v8 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; CHECK-NEXT: vid.v v24 ; CHECK-NEXT: vand.vi v26, v24, 1 ; CHECK-NEXT: vmsne.vi v10, v26, 0 ; CHECK-NEXT: vsrl.vi v8, v24, 1 ; CHECK-NEXT: vmv8r.v v24, v0 ; CHECK-NEXT: vmv4r.v v12, v4 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t ; CHECK-NEXT: vmv4r.v v28, v16 ; CHECK-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; CHECK-NEXT: vrgatherei16.vv v0, v24, v8 ; CHECK-NEXT: addi a0, sp, 16 ; CHECK-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; CHECK-NEXT: vmv4r.v v16, v12 ; CHECK-NEXT: vrgatherei16.vv v24, v16, v8 ; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; CHECK-NEXT: vmv.v.v v16, v24 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: slli a0, a0, 3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_nxv16f64_nxv8f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: addi sp, sp, -16 ; ZVBB-NEXT: .cfi_def_cfa_offset 16 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: sub sp, sp, a0 ; ZVBB-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb ; ZVBB-NEXT: vmv8r.v v0, v8 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: srli a0, a0, 1 ; ZVBB-NEXT: vsetvli a1, zero, e16, m2, ta, mu ; ZVBB-NEXT: vid.v v24 ; ZVBB-NEXT: vand.vi v26, v24, 1 ; ZVBB-NEXT: vmsne.vi v10, v26, 0 ; ZVBB-NEXT: vsrl.vi v8, v24, 1 ; ZVBB-NEXT: vmv8r.v v24, v0 ; ZVBB-NEXT: vmv4r.v v12, v4 ; ZVBB-NEXT: vmv1r.v v0, v10 ; ZVBB-NEXT: vadd.vx v8, v8, a0, v0.t ; ZVBB-NEXT: vmv4r.v v28, v16 ; ZVBB-NEXT: vsetvli zero, zero, e64, m8, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v0, v24, v8 ; ZVBB-NEXT: addi a0, sp, 16 ; ZVBB-NEXT: vs8r.v v0, (a0) # Unknown-size Folded Spill ; ZVBB-NEXT: vmv4r.v v16, v12 ; ZVBB-NEXT: vrgatherei16.vv v24, v16, v8 ; ZVBB-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; ZVBB-NEXT: vmv.v.v v16, v24 ; ZVBB-NEXT: csrr a0, vlenb ; ZVBB-NEXT: slli a0, a0, 3 ; ZVBB-NEXT: add sp, sp, a0 ; ZVBB-NEXT: addi sp, sp, 16 ; ZVBB-NEXT: ret %res = call @llvm.experimental.vector.interleave2.nxv16f64( %a, %b) ret %res } declare @llvm.experimental.vector.interleave2.nxv64f16(, ) declare @llvm.experimental.vector.interleave2.nxv32f32(, ) declare @llvm.experimental.vector.interleave2.nxv16f64(, )