; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zfh,+zvfh | FileCheck -check-prefixes=CHECK,RV32 %s ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zfh,+zvfh | FileCheck -check-prefixes=CHECK,RV64 %s ; RUN: llc < %s -mtriple=riscv32 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB ; RUN: llc < %s -mtriple=riscv64 -mattr=+v,+zvbb,+zfh,+zvfh | FileCheck %s --check-prefix=ZVBB ; Integers define <32 x i1> @vector_interleave_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b) { ; CHECK-LABEL: vector_interleave_v32i1_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v0, v8, 2 ; CHECK-NEXT: li a0, 32 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v10, v8, 16 ; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v12, v8, v10 ; CHECK-NEXT: li a1, -1 ; CHECK-NEXT: vwmaccu.vx v12, a1, v10 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v12, 0 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v32i1_v16i1: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 4, e8, mf4, ta, ma ; ZVBB-NEXT: vslideup.vi v0, v8, 2 ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; ZVBB-NEXT: vmv.v.i v8, 0 ; ZVBB-NEXT: vmerge.vim v8, v8, 1, v0 ; ZVBB-NEXT: vsetivli zero, 16, e8, m2, ta, ma ; ZVBB-NEXT: vslidedown.vi v10, v8, 16 ; ZVBB-NEXT: vsetivli zero, 16, e8, m1, ta, ma ; ZVBB-NEXT: vwsll.vi v12, v10, 8 ; ZVBB-NEXT: vwaddu.wv v12, v12, v8 ; ZVBB-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; ZVBB-NEXT: vmsne.vi v0, v12, 0 ; ZVBB-NEXT: ret %res = call <32 x i1> @llvm.experimental.vector.interleave2.v32i1(<16 x i1> %a, <16 x i1> %b) ret <32 x i1> %res } define <16 x i16> @vector_interleave_v16i16_v8i16(<8 x i16> %a, <8 x i16> %b) { ; CHECK-LABEL: vector_interleave_v16i16_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v16i16_v8i16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv2r.v v8, v10 ; ZVBB-NEXT: ret %res = call <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16> %a, <8 x i16> %b) ret <16 x i16> %res } define <8 x i32> @vector_interleave_v8i32_v4i32(<4 x i32> %a, <4 x i32> %b) { ; CHECK-LABEL: vector_interleave_v8i32_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v8i32_v4i32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVBB-NEXT: vwsll.vx v10, v9, a0 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv2r.v v8, v10 ; ZVBB-NEXT: ret %res = call <8 x i32> @llvm.experimental.vector.interleave2.v8i32(<4 x i32> %a, <4 x i32> %b) ret <8 x i32> %res } define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) { ; CHECK-LABEL: vector_interleave_v4i64_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: lui a0, 12304 ; CHECK-NEXT: addi a0, a0, 512 ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v10 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v4i64_v2i64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vmv1r.v v10, v9 ; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVBB-NEXT: vslideup.vi v8, v10, 2 ; ZVBB-NEXT: lui a0, 12304 ; ZVBB-NEXT: addi a0, a0, 512 ; ZVBB-NEXT: vmv.s.x v10, a0 ; ZVBB-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsext.vf2 v12, v10 ; ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v10, v8, v12 ; ZVBB-NEXT: vmv.v.v v8, v10 ; ZVBB-NEXT: ret %res = call <4 x i64> @llvm.experimental.vector.interleave2.v4i64(<2 x i64> %a, <2 x i64> %b) ret <4 x i64> %res } declare <32 x i1> @llvm.experimental.vector.interleave2.v32i1(<16 x i1>, <16 x i1>) declare <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16>, <8 x i16>) declare <8 x i32> @llvm.experimental.vector.interleave2.v8i32(<4 x i32>, <4 x i32>) declare <4 x i64> @llvm.experimental.vector.interleave2.v4i64(<2 x i64>, <2 x i64>) ; Floats define <4 x half> @vector_interleave_v4f16_v2f16(<2 x half> %a, <2 x half> %b) { ; CHECK-LABEL: vector_interleave_v4f16_v2f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v4f16_v2f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv1r.v v8, v10 ; ZVBB-NEXT: ret %res = call <4 x half> @llvm.experimental.vector.interleave2.v4f16(<2 x half> %a, <2 x half> %b) ret <4 x half> %res } define <8 x half> @vector_interleave_v8f16_v4f16(<4 x half> %a, <4 x half> %b) { ; CHECK-LABEL: vector_interleave_v8f16_v4f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v8f16_v4f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv1r.v v8, v10 ; ZVBB-NEXT: ret %res = call <8 x half> @llvm.experimental.vector.interleave2.v8f16(<4 x half> %a, <4 x half> %b) ret <8 x half> %res } define <4 x float> @vector_interleave_v4f32_v2f32(<2 x float> %a, <2 x float> %b) { ; CHECK-LABEL: vector_interleave_v4f32_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v4f32_v2f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; ZVBB-NEXT: vwsll.vx v10, v9, a0 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv1r.v v8, v10 ; ZVBB-NEXT: ret %res = call <4 x float> @llvm.experimental.vector.interleave2.v4f32(<2 x float> %a, <2 x float> %b) ret <4 x float> %res } define <16 x half> @vector_interleave_v16f16_v8f16(<8 x half> %a, <8 x half> %b) { ; CHECK-LABEL: vector_interleave_v16f16_v8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v16f16_v8f16: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; ZVBB-NEXT: vwsll.vi v10, v9, 16 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv2r.v v8, v10 ; ZVBB-NEXT: ret %res = call <16 x half> @llvm.experimental.vector.interleave2.v16f16(<8 x half> %a, <8 x half> %b) ret <16 x half> %res } define <8 x float> @vector_interleave_v8f32_v4f32(<4 x float> %a, <4 x float> %b) { ; CHECK-LABEL: vector_interleave_v8f32_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vwaddu.vv v10, v8, v9 ; CHECK-NEXT: li a0, -1 ; CHECK-NEXT: vwmaccu.vx v10, a0, v9 ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v8f32_v4f32: ; ZVBB: # %bb.0: ; ZVBB-NEXT: li a0, 32 ; ZVBB-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVBB-NEXT: vwsll.vx v10, v9, a0 ; ZVBB-NEXT: vwaddu.wv v10, v10, v8 ; ZVBB-NEXT: vmv2r.v v8, v10 ; ZVBB-NEXT: ret %res = call <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float> %a, <4 x float> %b) ret <8 x float> %res } define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double> %b) { ; CHECK-LABEL: vector_interleave_v4f64_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v9 ; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 2 ; CHECK-NEXT: lui a0, 12304 ; CHECK-NEXT: addi a0, a0, 512 ; CHECK-NEXT: vmv.s.x v10, a0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; CHECK-NEXT: vsext.vf2 v12, v10 ; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; CHECK-NEXT: vrgatherei16.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret ; ; ZVBB-LABEL: vector_interleave_v4f64_v2f64: ; ZVBB: # %bb.0: ; ZVBB-NEXT: vmv1r.v v10, v9 ; ZVBB-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVBB-NEXT: vslideup.vi v8, v10, 2 ; ZVBB-NEXT: lui a0, 12304 ; ZVBB-NEXT: addi a0, a0, 512 ; ZVBB-NEXT: vmv.s.x v10, a0 ; ZVBB-NEXT: vsetvli zero, zero, e16, mf2, ta, ma ; ZVBB-NEXT: vsext.vf2 v12, v10 ; ZVBB-NEXT: vsetvli zero, zero, e64, m2, ta, ma ; ZVBB-NEXT: vrgatherei16.vv v10, v8, v12 ; ZVBB-NEXT: vmv.v.v v8, v10 ; ZVBB-NEXT: ret %res = call <4 x double> @llvm.experimental.vector.interleave2.v4f64(<2 x double> %a, <2 x double> %b) ret <4 x double> %res } declare <4 x half> @llvm.experimental.vector.interleave2.v4f16(<2 x half>, <2 x half>) declare <8 x half> @llvm.experimental.vector.interleave2.v8f16(<4 x half>, <4 x half>) declare <4 x float> @llvm.experimental.vector.interleave2.v4f32(<2 x float>, <2 x float>) declare <16 x half> @llvm.experimental.vector.interleave2.v16f16(<8 x half>, <8 x half>) declare <8 x float> @llvm.experimental.vector.interleave2.v8f32(<4 x float>, <4 x float>) declare <4 x double> @llvm.experimental.vector.interleave2.v4f64(<2 x double>, <2 x double>) ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; RV32: {{.*}} ; RV64: {{.*}}