; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+m,+f,+d,+v -verify-machineinstrs < %s | FileCheck %s define @test_vp_reverse_nxv1f64_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv1f64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv1f64( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv1f64( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a1 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv1f64( %src, %allones, i32 %evl) ret %dst } define @test_vp_reverse_nxv2f32_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv2f32_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v10, v9, a0, v0.t ; CHECK-NEXT: vrgather.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv2f32( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv2f32( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vid.v v9 ; CHECK-NEXT: vrsub.vx v10, v9, a1 ; CHECK-NEXT: vrgather.vv v9, v8, v10 ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv2f32( %src, %allones, i32 %evl) ret %dst } define @test_vp_reverse_nxv2f64_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv2f64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vid.v v10, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v12, v10, a0, v0.t ; CHECK-NEXT: vrgather.vv v10, v8, v12, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv2f64( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv2f64( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a1 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv2f64( %src, %allones, i32 %evl) ret %dst } define @test_vp_reverse_nxv4f32_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv4f32_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v12, v10, a0, v0.t ; CHECK-NEXT: vrgather.vv v10, v8, v12, v0.t ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv4f32( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv4f32( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vid.v v10 ; CHECK-NEXT: vrsub.vx v12, v10, a1 ; CHECK-NEXT: vrgather.vv v10, v8, v12 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv4f32( %src, %allones, i32 %evl) ret %dst } define @test_vp_reverse_nxv4f64_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv4f64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vid.v v12, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v16, v12, a0, v0.t ; CHECK-NEXT: vrgather.vv v12, v8, v16, v0.t ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv4f64( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv4f64( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a1 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv4f64( %src, %allones, i32 %evl) ret %dst } define @test_vp_reverse_nxv8f32_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv8f32_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vid.v v12, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v16, v12, a0, v0.t ; CHECK-NEXT: vrgather.vv v12, v8, v16, v0.t ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv8f32( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv8f32( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vid.v v12 ; CHECK-NEXT: vrsub.vx v16, v12, a1 ; CHECK-NEXT: vrgather.vv v12, v8, v16 ; CHECK-NEXT: vmv.v.v v8, v12 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv8f32( %src, %allones, i32 %evl) ret %dst } define @test_vp_reverse_nxv8f64_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv8f64_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vid.v v16, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v24, v16, a0, v0.t ; CHECK-NEXT: vrgather.vv v16, v8, v24, v0.t ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv8f64( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv8f64( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a1 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv8f64( %src, %allones, i32 %evl) ret %dst } define @test_vp_reverse_nxv16f32_masked( %src, %mask, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv16f32_masked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vid.v v16, v0.t ; CHECK-NEXT: addi a0, a0, -1 ; CHECK-NEXT: vrsub.vx v24, v16, a0, v0.t ; CHECK-NEXT: vrgather.vv v16, v8, v24, v0.t ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %dst = call @llvm.experimental.vp.reverse.nxv16f32( %src, %mask, i32 %evl) ret %dst } define @test_vp_reverse_nxv16f32( %src, i32 zeroext %evl) { ; CHECK-LABEL: test_vp_reverse_nxv16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: addi a1, a0, -1 ; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma ; CHECK-NEXT: vid.v v16 ; CHECK-NEXT: vrsub.vx v24, v16, a1 ; CHECK-NEXT: vrgather.vv v16, v8, v24 ; CHECK-NEXT: vmv.v.v v8, v16 ; CHECK-NEXT: ret %head = insertelement undef, i1 1, i32 0 %allones = shufflevector %head, undef, zeroinitializer %dst = call @llvm.experimental.vp.reverse.nxv16f32( %src, %allones, i32 %evl) ret %dst } ; LMUL = 1 declare @llvm.experimental.vp.reverse.nxv1f64(,,i32) declare @llvm.experimental.vp.reverse.nxv2f32(,,i32) ; LMUL = 2 declare @llvm.experimental.vp.reverse.nxv2f64(,,i32) declare @llvm.experimental.vp.reverse.nxv4f32(,,i32) ; LMUL = 4 declare @llvm.experimental.vp.reverse.nxv4f64(,,i32) declare @llvm.experimental.vp.reverse.nxv8f32(,,i32) ; LMUL = 8 declare @llvm.experimental.vp.reverse.nxv8f64(,,i32) declare @llvm.experimental.vp.reverse.nxv16f32(,,i32)