; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs \ ; RUN: < %s | FileCheck %s declare @llvm.experimental.vp.splice.nxv1i1(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv2i1(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv4i1(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv8i1(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv16i1(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv32i1(, , i32, , i32, i32) declare @llvm.experimental.vp.splice.nxv64i1(, , i32, , i32, i32) define @test_vp_splice_nxv1i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v9, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv1i1( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i1_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, mf8, ta, ma ; CHECK-NEXT: vslidedown.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 5 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv1i1( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv1i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv1i1_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v11, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv1i1( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v9, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2i1( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i1_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, mf4, ta, ma ; CHECK-NEXT: vslidedown.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 5 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv2i1( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv2i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv2i1_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v11, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv2i1( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv4i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v9, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv4i1( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv4i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i1_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, mf2, ta, ma ; CHECK-NEXT: vslidedown.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 5 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv4i1( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv4i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv4i1_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v11, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv4i1( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv8i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vi v9, v9, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vx v9, v8, a0 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv8i1( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv8i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i1_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v9, v10, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, m1, ta, ma ; CHECK-NEXT: vslidedown.vx v9, v9, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vslideup.vi v9, v8, 5 ; CHECK-NEXT: vmsne.vi v0, v9, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv8i1( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv8i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv8i1_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v8, v11, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv.v.i v11, 0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v10, v11, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu ; CHECK-NEXT: vslideup.vx v10, v8, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v10, 0, v0.t ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv8i1( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv16i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslideup.vx v8, v10, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv16i1( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv16i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv16i1_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v10, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v10, v10, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v12, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, m2, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vslideup.vi v8, v10, 5 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv16i1( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv16i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv16i1_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv.v.i v14, 0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v10, v14, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vslidedown.vi v10, v10, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu ; CHECK-NEXT: vslideup.vx v10, v12, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m2, ta, ma ; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv16i1( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv32i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslideup.vx v8, v12, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv32i1( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv32i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv32i1_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v16, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, m4, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vslideup.vi v8, v12, 5 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv32i1( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv32i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv32i1_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v12, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v12, v12, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vslidedown.vi v16, v16, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu ; CHECK-NEXT: vslideup.vx v16, v12, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m4, ta, ma ; CHECK-NEXT: vmsne.vi v8, v16, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv32i1( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv64i1( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vi v8, v8, 5 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vslideup.vx v8, v16, a0 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv64i1( %va, %vb, i32 5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv64i1_negative_offset( %va, %vb, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv64i1_negative_offset: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vmerge.vim v8, v24, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetivli zero, 5, e8, m8, ta, ma ; CHECK-NEXT: vslidedown.vx v8, v8, a0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vslideup.vi v8, v16, 5 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %v = call @llvm.experimental.vp.splice.nxv64i1( %va, %vb, i32 -5, %allones, i32 %evla, i32 %evlb) ret %v } define @test_vp_splice_nxv64i1_masked( %va, %vb, %mask, i32 zeroext %evla, i32 zeroext %evlb) { ; CHECK-LABEL: test_vp_splice_nxv64i1_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v10, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v16, 0 ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: vmerge.vim v16, v16, 1, v0 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv.v.i v24, 0 ; CHECK-NEXT: vmv1r.v v0, v10 ; CHECK-NEXT: vmerge.vim v24, v24, 1, v0 ; CHECK-NEXT: addi a0, a0, -5 ; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma ; CHECK-NEXT: vmv1r.v v0, v9 ; CHECK-NEXT: vslidedown.vi v24, v24, 5, v0.t ; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu ; CHECK-NEXT: vslideup.vx v24, v16, a0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, m8, ta, ma ; CHECK-NEXT: vmsne.vi v8, v24, 0, v0.t ; CHECK-NEXT: vmv1r.v v0, v8 ; CHECK-NEXT: ret %v = call @llvm.experimental.vp.splice.nxv64i1( %va, %vb, i32 5, %mask, i32 %evla, i32 %evlb) ret %v }