; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple riscv32 -mattr=+m,+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple riscv64 -mattr=+m,+d,+zfh,+zvfh,+v -verify-machineinstrs < %s | FileCheck %s define @extract_nxv8i32_nxv4i32_0( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 0) ret %c } define @extract_nxv8i32_nxv4i32_4( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 4) ret %c } define @extract_nxv8i32_nxv2i32_0( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 0) ret %c } define @extract_nxv8i32_nxv2i32_2( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 2) ret %c } define @extract_nxv8i32_nxv2i32_4( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 4) ret %c } define @extract_nxv8i32_nxv2i32_6( %vec) { ; CHECK-LABEL: extract_nxv8i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 6) ret %c } define @extract_nxv16i32_nxv8i32_0( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv8i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 0) ret %c } define @extract_nxv16i32_nxv8i32_8( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv8i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv4r.v v8, v12 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 8) ret %c } define @extract_nxv16i32_nxv4i32_0( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv4i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 0) ret %c } define @extract_nxv16i32_nxv4i32_4( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv4i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 4) ret %c } define @extract_nxv16i32_nxv4i32_8( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv4i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 8) ret %c } define @extract_nxv16i32_nxv4i32_12( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv4i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv2r.v v8, v14 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 12) ret %c } define @extract_nxv16i32_nxv2i32_0( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 0) ret %c } define @extract_nxv16i32_nxv2i32_2( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 2) ret %c } define @extract_nxv16i32_nxv2i32_4( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v10 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 4) ret %c } define @extract_nxv16i32_nxv2i32_6( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v11 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 6) ret %c } define @extract_nxv16i32_nxv2i32_8( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v12 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 8) ret %c } define @extract_nxv16i32_nxv2i32_10( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_10: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v13 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 10) ret %c } define @extract_nxv16i32_nxv2i32_12( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_12: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v14 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 12) ret %c } define @extract_nxv16i32_nxv2i32_14( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv2i32_14: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v15 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 14) ret %c } define @extract_nxv16i32_nxv1i32_0( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 0) ret %c } define @extract_nxv16i32_nxv1i32_1( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv1i32_1: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 1) ret %c } define @extract_nxv16i32_nxv1i32_3( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv1i32_3: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v9, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 3) ret %c } define @extract_nxv16i32_nxv1i32_15( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv1i32_15: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v15, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 15) ret %c } define @extract_nxv16i32_nxv1i32_2( %vec) { ; CHECK-LABEL: extract_nxv16i32_nxv1i32_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 2) ret %c } define @extract_nxv2i32_nxv1i32_0( %vec) { ; CHECK-LABEL: extract_nxv2i32_nxv1i32_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i32.nxv2i32( %vec, i64 0) ret %c } define @extract_nxv32i8_nxv2i8_0( %vec) { ; CHECK-LABEL: extract_nxv32i8_nxv2i8_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 0) ret %c } define @extract_nxv32i8_nxv2i8_2( %vec) { ; CHECK-LABEL: extract_nxv32i8_nxv2i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 2) ret %c } define @extract_nxv32i8_nxv2i8_4( %vec) { ; CHECK-LABEL: extract_nxv32i8_nxv2i8_4: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 4) ret %c } define @extract_nxv32i8_nxv2i8_6( %vec) { ; CHECK-LABEL: extract_nxv32i8_nxv2i8_6: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 6) ret %c } define @extract_nxv32i8_nxv2i8_8( %vec) { ; CHECK-LABEL: extract_nxv32i8_nxv2i8_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 8) ret %c } define @extract_nxv32i8_nxv2i8_22( %vec) { ; CHECK-LABEL: extract_nxv32i8_nxv2i8_22: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: li a1, 6 ; CHECK-NEXT: mul a0, a0, a1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v10, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 22) ret %c } define @extract_nxv8i8_nxv1i8_7( %vec) { ; CHECK-LABEL: extract_nxv8i8_nxv1i8_7: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a1, a0, 3 ; CHECK-NEXT: sub a0, a0, a1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i8.nxv8i8( %vec, i64 7) ret %c } define @extract_nxv4i8_nxv1i8_3( %vec) { ; CHECK-LABEL: extract_nxv4i8_nxv1i8_3: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: slli a1, a0, 1 ; CHECK-NEXT: add a0, a1, a0 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv1i8.nxv4i8( %vec, i64 3) ret %c } define @extract_nxv2f16_nxv16f16_0( %vec) { ; CHECK-LABEL: extract_nxv2f16_nxv16f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 0) ret %c } define @extract_nxv2f16_nxv16f16_2( %vec) { ; CHECK-LABEL: extract_nxv2f16_nxv16f16_2: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 2) ret %c } define @extract_nxv2f16_nxv16f16_4( %vec) { ; CHECK-LABEL: extract_nxv2f16_nxv16f16_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 4) ret %c } define @extract_nxv64i1_nxv8i1_0( %mask) { ; CHECK-LABEL: extract_nxv64i1_nxv8i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv8i1( %mask, i64 0) ret %c } define @extract_nxv64i1_nxv8i1_8( %mask) { ; CHECK-LABEL: extract_nxv64i1_nxv8i1_8: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 3 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv8i1( %mask, i64 8) ret %c } define @extract_nxv64i1_nxv2i1_0( %mask) { ; CHECK-LABEL: extract_nxv64i1_nxv2i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i1( %mask, i64 0) ret %c } define @extract_nxv64i1_nxv2i1_2( %mask) { ; CHECK-LABEL: extract_nxv64i1_nxv2i1_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv2i1( %mask, i64 2) ret %c } define @extract_nxv4i1_nxv32i1_0( %x) { ; CHECK-LABEL: extract_nxv4i1_nxv32i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i1( %x, i64 0) ret %c } define @extract_nxv4i1_nxv32i1_4( %x) { ; CHECK-LABEL: extract_nxv4i1_nxv32i1_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: vmerge.vim v8, v8, 1, v0 ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 1 ; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli a0, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv4i1( %x, i64 4) ret %c } define @extract_nxv16i1_nxv32i1_0( %x) { ; CHECK-LABEL: extract_nxv16i1_nxv32i1_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv16i1( %x, i64 0) ret %c } define @extract_nxv16i1_nxv32i1_16( %x) { ; CHECK-LABEL: extract_nxv16i1_nxv32i1_16: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v0, v0, a0 ; CHECK-NEXT: ret %c = call @llvm.vector.extract.nxv16i1( %x, i64 16) ret %c } ; ; Extract f16 vector that needs widening from one that needs widening. ; define @extract_nxv6f16_nxv12f16_0( %in) { ; CHECK-LABEL: extract_nxv6f16_nxv12f16_0: ; CHECK: # %bb.0: ; CHECK-NEXT: ret %res = call @llvm.vector.extract.nxv6f16.nxv12f16( %in, i64 0) ret %res } define @extract_nxv6f16_nxv12f16_6( %in) { ; CHECK-LABEL: extract_nxv6f16_nxv12f16_6: ; CHECK: # %bb.0: ; CHECK-NEXT: csrr a0, vlenb ; CHECK-NEXT: srli a0, a0, 2 ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v13, v10, a0 ; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v12, v9, a0 ; CHECK-NEXT: add a1, a0, a0 ; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma ; CHECK-NEXT: vslideup.vx v12, v10, a0 ; CHECK-NEXT: vmv2r.v v8, v12 ; CHECK-NEXT: ret %res = call @llvm.vector.extract.nxv6f16.nxv12f16( %in, i64 6) ret %res } declare @llvm.vector.extract.nxv6f16.nxv12f16(, i64) declare @llvm.vector.extract.nxv1i8.nxv4i8( %vec, i64 %idx) declare @llvm.vector.extract.nxv1i8.nxv8i8( %vec, i64 %idx) declare @llvm.vector.extract.nxv2i8.nxv32i8( %vec, i64 %idx) declare @llvm.vector.extract.nxv1i32.nxv2i32( %vec, i64 %idx) declare @llvm.vector.extract.nxv2i32.nxv8i32( %vec, i64 %idx) declare @llvm.vector.extract.nxv4i32.nxv8i32( %vec, i64 %idx) declare @llvm.vector.extract.nxv1i32.nxv16i32( %vec, i64 %idx) declare @llvm.vector.extract.nxv2i32.nxv16i32( %vec, i64 %idx) declare @llvm.vector.extract.nxv4i32.nxv16i32( %vec, i64 %idx) declare @llvm.vector.extract.nxv8i32.nxv16i32( %vec, i64 %idx) declare @llvm.vector.extract.nxv2f16.nxv16f16( %vec, i64 %idx) declare @llvm.vector.extract.nxv4i1( %vec, i64 %idx) declare @llvm.vector.extract.nxv16i1( %vec, i64 %idx) declare @llvm.vector.extract.nxv2i1( %vec, i64 %idx) declare @llvm.vector.extract.nxv8i1( %vec, i64 %idx)