; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh -verify-machineinstrs < %s | FileCheck %s -check-prefixes=CHECK,RV64 ; RUN: llc -mtriple=riscv32 -mattr=+v,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-V ; RUN: llc -mtriple=riscv64 -mattr=+v,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-V ; RUN: llc -mtriple=riscv32 -mattr=+zve32x,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X ; RUN: llc -mtriple=riscv64 -mattr=+zve32x,+zvfh,+zvkb -verify-machineinstrs < %s | FileCheck %s -check-prefixes=ZVKB-ZVE32X define <8 x i1> @shuffle_v8i1_as_i8_1(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v0, 1 ; CHECK-NEXT: vsll.vi v9, v0, 7 ; CHECK-NEXT: vor.vv v0, v9, v8 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i1_as_i8_1: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; ZVKB-V-NEXT: vror.vi v0, v0, 1 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i1_as_i8_1: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v0, v0, 1 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> ret <8 x i1> %shuffle } define <8 x i1> @shuffle_v8i1_as_i8_2(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v0, 2 ; CHECK-NEXT: vsll.vi v9, v0, 6 ; CHECK-NEXT: vor.vv v0, v9, v8 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i1_as_i8_2: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; ZVKB-V-NEXT: vror.vi v0, v0, 2 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i1_as_i8_2: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v0, v0, 2 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> ret <8 x i1> %shuffle } define <8 x i1> @shuffle_v8i1_as_i8_3(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_3: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v0, 3 ; CHECK-NEXT: vsll.vi v9, v0, 5 ; CHECK-NEXT: vor.vv v0, v9, v8 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i1_as_i8_3: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; ZVKB-V-NEXT: vror.vi v0, v0, 3 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i1_as_i8_3: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v0, v0, 3 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> ret <8 x i1> %shuffle } define <8 x i1> @shuffle_v8i1_as_i8_4(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_4: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v0, 4 ; CHECK-NEXT: vsll.vi v9, v0, 4 ; CHECK-NEXT: vor.vv v0, v9, v8 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i1_as_i8_4: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; ZVKB-V-NEXT: vror.vi v0, v0, 4 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i1_as_i8_4: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v0, v0, 4 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> ret <8 x i1> %shuffle } define <8 x i1> @shuffle_v8i1_as_i8_5(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_5: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v0, 5 ; CHECK-NEXT: vsll.vi v9, v0, 3 ; CHECK-NEXT: vor.vv v0, v9, v8 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i1_as_i8_5: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; ZVKB-V-NEXT: vror.vi v0, v0, 5 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i1_as_i8_5: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v0, v0, 5 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> ret <8 x i1> %shuffle } define <8 x i1> @shuffle_v8i1_as_i8_6(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_6: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v0, 6 ; CHECK-NEXT: vsll.vi v9, v0, 2 ; CHECK-NEXT: vor.vv v0, v9, v8 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i1_as_i8_6: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; ZVKB-V-NEXT: vror.vi v0, v0, 6 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i1_as_i8_6: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v0, v0, 6 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> ret <8 x i1> %shuffle } define <8 x i1> @shuffle_v8i1_as_i8_7(<8 x i1> %v) { ; CHECK-LABEL: shuffle_v8i1_as_i8_7: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v8, v0, 7 ; CHECK-NEXT: vadd.vv v9, v0, v0 ; CHECK-NEXT: vor.vv v0, v9, v8 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i1_as_i8_7: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e8, mf8, ta, ma ; ZVKB-V-NEXT: vror.vi v0, v0, 7 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i1_as_i8_7: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 1, e8, mf4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v0, v0, 7 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i1> %v, <8 x i1> poison, <8 x i32> ret <8 x i1> %shuffle } define <8 x i8> @shuffle_v8i8_as_i16(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i16: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; ZVKB-V-NEXT: vrev8.v v8, v8 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i16: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 4, e16, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vrev8.v v8, v8 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i32_8(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i32_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 8 ; CHECK-NEXT: vsll.vi v8, v8, 24 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i32_8: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 8 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i32_8: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v8, v8, 8 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i32_16(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i32_16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 16 ; CHECK-NEXT: vsll.vi v8, v8, 16 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i32_16: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 16 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i32_16: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v8, v8, 16 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i32_24(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i32_24: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 24 ; CHECK-NEXT: vsll.vi v8, v8, 8 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i32_24: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 24 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i32_24: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 2, e32, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v8, v8, 24 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i64_8(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i64_8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 1 ; CHECK-NEXT: vslideup.vi v9, v8, 7 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i64_8: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 8 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i64_8: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e8, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vslidedown.vi v10, v8, 1 ; ZVKB-ZVE32X-NEXT: vslideup.vi v10, v8, 7 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i64_16(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i64_16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 2 ; CHECK-NEXT: vslideup.vi v9, v8, 6 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i64_16: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 16 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i64_16: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e8, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vslidedown.vi v10, v8, 2 ; ZVKB-ZVE32X-NEXT: vslideup.vi v10, v8, 6 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i64_24(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i64_24: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 3 ; CHECK-NEXT: vslideup.vi v9, v8, 5 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i64_24: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 24 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i64_24: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e8, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vslidedown.vi v10, v8, 3 ; ZVKB-ZVE32X-NEXT: vslideup.vi v10, v8, 5 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i64_32(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i64_32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 4 ; CHECK-NEXT: vslideup.vi v9, v8, 4 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i64_32: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 32 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i64_32: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e8, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vslidedown.vi v10, v8, 4 ; ZVKB-ZVE32X-NEXT: vslideup.vi v10, v8, 4 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i64_40(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i64_40: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 5 ; CHECK-NEXT: vslideup.vi v9, v8, 3 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i64_40: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 40 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i64_40: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e8, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vslidedown.vi v10, v8, 5 ; ZVKB-ZVE32X-NEXT: vslideup.vi v10, v8, 3 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i64_48(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i64_48: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 6 ; CHECK-NEXT: vslideup.vi v9, v8, 2 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i64_48: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 48 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i64_48: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e8, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vslidedown.vi v10, v8, 6 ; ZVKB-ZVE32X-NEXT: vslideup.vi v10, v8, 2 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i8> @shuffle_v8i8_as_i64_56(<8 x i8> %v) { ; CHECK-LABEL: shuffle_v8i8_as_i64_56: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v8, 7 ; CHECK-NEXT: vslideup.vi v9, v8, 1 ; CHECK-NEXT: vmv1r.v v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i8_as_i64_56: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 56 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i8_as_i64_56: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e8, m2, ta, ma ; ZVKB-ZVE32X-NEXT: vslidedown.vi v10, v8, 7 ; ZVKB-ZVE32X-NEXT: vslideup.vi v10, v8, 1 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v10 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i8> %v, <8 x i8> poison, <8 x i32> ret <8 x i8> %shuffle } define <8 x i16> @shuffle_v8i16_as_i32(<8 x i16> %v) { ; CHECK-LABEL: shuffle_v8i16_as_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 16 ; CHECK-NEXT: vsll.vi v8, v8, 16 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i16_as_i32: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 16 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i16_as_i32: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 4, e32, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v8, v8, 16 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> ret <8 x i16> %shuffle } define <8 x i16> @shuffle_v8i16_as_i64_16(<8 x i16> %v) { ; RV32-LABEL: shuffle_v8i16_as_i64_16: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 48 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v10, v9, a0 ; RV32-NEXT: vsll.vv v10, v8, v10 ; RV32-NEXT: vrsub.vi v9, v9, 0 ; RV32-NEXT: vand.vx v9, v9, a0 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8i16_as_i64_16: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 48 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsll.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 16 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i16_as_i64_16: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 16 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i16_as_i64_16: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI19_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI19_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v12, v8, v16 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> ret <8 x i16> %shuffle } define <8 x i16> @shuffle_v8i16_as_i64_32(<8 x i16> %v) { ; RV32-LABEL: shuffle_v8i16_as_i64_32: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v10, v9, a0 ; RV32-NEXT: vsll.vv v10, v8, v10 ; RV32-NEXT: vrsub.vi v9, v9, 0 ; RV32-NEXT: vand.vx v9, v9, a0 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8i16_as_i64_32: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i16_as_i64_32: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 32 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i16_as_i64_32: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI20_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI20_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v12, v8, v16 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> ret <8 x i16> %shuffle } define <8 x i16> @shuffle_v8i16_as_i64_48(<8 x i16> %v) { ; RV32-LABEL: shuffle_v8i16_as_i64_48: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 16 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v10, v9, a0 ; RV32-NEXT: vsll.vv v10, v8, v10 ; RV32-NEXT: vrsub.vi v9, v9, 0 ; RV32-NEXT: vand.vx v9, v9, a0 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8i16_as_i64_48: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 48 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vsll.vi v8, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i16_as_i64_48: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 48 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i16_as_i64_48: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI21_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI21_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v12, v8, v16 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i16> %v, <8 x i16> poison, <8 x i32> ret <8 x i16> %shuffle } define <8 x i32> @shuffle_v8i32_as_i64(<8 x i32> %v) { ; RV32-LABEL: shuffle_v8i32_as_i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v12, v10, a0 ; RV32-NEXT: vsll.vv v12, v8, v12 ; RV32-NEXT: vrsub.vi v10, v10, 0 ; RV32-NEXT: vand.vx v10, v10, a0 ; RV32-NEXT: vsrl.vv v8, v8, v10 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8i32_as_i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8i32_as_i64: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 32 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8i32_as_i64: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI22_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI22_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v16, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v24, v16 ; ZVKB-ZVE32X-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v16, v8, v24 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v16 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x i32> %v, <8 x i32> poison, <8 x i32> ret <8 x i32> %shuffle } define <8 x half> @shuffle_v8f16_as_i32(<8 x half> %v) { ; CHECK-LABEL: shuffle_v8f16_as_i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 16 ; CHECK-NEXT: vsll.vi v8, v8, 16 ; CHECK-NEXT: vor.vv v8, v8, v9 ; CHECK-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8f16_as_i32: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 16 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8f16_as_i32: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: vsetivli zero, 4, e32, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vror.vi v8, v8, 16 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> ret <8 x half> %shuffle } define <8 x half> @shuffle_v8f16_as_i64_16(<8 x half> %v) { ; RV32-LABEL: shuffle_v8f16_as_i64_16: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 48 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v10, v9, a0 ; RV32-NEXT: vsll.vv v10, v8, v10 ; RV32-NEXT: vrsub.vi v9, v9, 0 ; RV32-NEXT: vand.vx v9, v9, a0 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8f16_as_i64_16: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 48 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsll.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 16 ; RV64-NEXT: vor.vv v8, v9, v8 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8f16_as_i64_16: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 16 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8f16_as_i64_16: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI24_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI24_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v12, v8, v16 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> ret <8 x half> %shuffle } define <8 x half> @shuffle_v8f16_as_i64_32(<8 x half> %v) { ; RV32-LABEL: shuffle_v8f16_as_i64_32: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v10, v9, a0 ; RV32-NEXT: vsll.vv v10, v8, v10 ; RV32-NEXT: vrsub.vi v9, v9, 0 ; RV32-NEXT: vand.vx v9, v9, a0 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8f16_as_i64_32: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8f16_as_i64_32: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 32 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8f16_as_i64_32: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI25_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI25_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v12, v8, v16 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> ret <8 x half> %shuffle } define <8 x half> @shuffle_v8f16_as_i64_48(<8 x half> %v) { ; RV32-LABEL: shuffle_v8f16_as_i64_48: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 16 ; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v10, v9, a0 ; RV32-NEXT: vsll.vv v10, v8, v10 ; RV32-NEXT: vrsub.vi v9, v9, 0 ; RV32-NEXT: vand.vx v9, v9, a0 ; RV32-NEXT: vsrl.vv v8, v8, v9 ; RV32-NEXT: vor.vv v8, v10, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8f16_as_i64_48: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 48 ; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-NEXT: vsrl.vx v9, v8, a0 ; RV64-NEXT: vsll.vi v8, v8, 16 ; RV64-NEXT: vor.vv v8, v8, v9 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8f16_as_i64_48: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 48 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8f16_as_i64_48: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI26_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI26_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v12, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v16, v12 ; ZVKB-ZVE32X-NEXT: vrgather.vv v12, v8, v16 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v12 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x half> %v, <8 x half> poison, <8 x i32> ret <8 x half> %shuffle } define <8 x float> @shuffle_v8f32_as_i64(<8 x float> %v) { ; RV32-LABEL: shuffle_v8f32_as_i64: ; RV32: # %bb.0: ; RV32-NEXT: li a0, 32 ; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a0 ; RV32-NEXT: li a0, 63 ; RV32-NEXT: vand.vx v12, v10, a0 ; RV32-NEXT: vsll.vv v12, v8, v12 ; RV32-NEXT: vrsub.vi v10, v10, 0 ; RV32-NEXT: vand.vx v10, v10, a0 ; RV32-NEXT: vsrl.vv v8, v8, v10 ; RV32-NEXT: vor.vv v8, v12, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: shuffle_v8f32_as_i64: ; RV64: # %bb.0: ; RV64-NEXT: li a0, 32 ; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-NEXT: vsrl.vx v10, v8, a0 ; RV64-NEXT: vsll.vx v8, v8, a0 ; RV64-NEXT: vor.vv v8, v8, v10 ; RV64-NEXT: ret ; ; ZVKB-V-LABEL: shuffle_v8f32_as_i64: ; ZVKB-V: # %bb.0: ; ZVKB-V-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; ZVKB-V-NEXT: vror.vi v8, v8, 32 ; ZVKB-V-NEXT: ret ; ; ZVKB-ZVE32X-LABEL: shuffle_v8f32_as_i64: ; ZVKB-ZVE32X: # %bb.0: ; ZVKB-ZVE32X-NEXT: lui a0, %hi(.LCPI27_0) ; ZVKB-ZVE32X-NEXT: addi a0, a0, %lo(.LCPI27_0) ; ZVKB-ZVE32X-NEXT: vsetivli zero, 8, e16, m4, ta, ma ; ZVKB-ZVE32X-NEXT: vle8.v v16, (a0) ; ZVKB-ZVE32X-NEXT: vsext.vf2 v24, v16 ; ZVKB-ZVE32X-NEXT: vsetvli zero, zero, e32, m8, ta, ma ; ZVKB-ZVE32X-NEXT: vrgatherei16.vv v16, v8, v24 ; ZVKB-ZVE32X-NEXT: vmv.v.v v8, v16 ; ZVKB-ZVE32X-NEXT: ret %shuffle = shufflevector <8 x float> %v, <8 x float> poison, <8 x i32> ret <8 x float> %shuffle }