; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+zfh,+zvfh,+v -target-abi ilp32d -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,OPTIMIZED ; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zfh,+zvfh,+v -target-abi lp64d -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,OPTIMIZED ; RUN: llc -mtriple=riscv32 -mattr=+f,+d,+zfh,+zvfh,+v,+no-optimized-zero-stride-load -target-abi ilp32d -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,NOT-OPTIMIZED ; RUN: llc -mtriple=riscv64 -mattr=+f,+d,+zfh,+zvfh,+v,+no-optimized-zero-stride-load -target-abi lp64d -verify-machineinstrs < %s \ ; RUN: | FileCheck %s --check-prefixes=CHECK,NOT-OPTIMIZED define @vsplat_nxv8f16(half %f) { ; CHECK-LABEL: vsplat_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, half %f, i32 0 %splat = shufflevector %head, poison, zeroinitializer ret %splat } define @vsplat_zero_nxv8f16() { ; CHECK-LABEL: vsplat_zero_nxv8f16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, half zeroinitializer, i32 0 %splat = shufflevector %head, poison, zeroinitializer ret %splat } define @vsplat_nxv8f32(float %f) { ; CHECK-LABEL: vsplat_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, float %f, i32 0 %splat = shufflevector %head, poison, zeroinitializer ret %splat } define @vsplat_zero_nxv8f32() { ; CHECK-LABEL: vsplat_zero_nxv8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, float zeroinitializer, i32 0 %splat = shufflevector %head, poison, zeroinitializer ret %splat } define @vsplat_nxv8f64(double %f) { ; CHECK-LABEL: vsplat_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vfmv.v.f v8, fa0 ; CHECK-NEXT: ret %head = insertelement poison, double %f, i32 0 %splat = shufflevector %head, poison, zeroinitializer ret %splat } define @vsplat_zero_nxv8f64() { ; CHECK-LABEL: vsplat_zero_nxv8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli a0, zero, e64, m8, ta, ma ; CHECK-NEXT: vmv.v.i v8, 0 ; CHECK-NEXT: ret %head = insertelement poison, double zeroinitializer, i32 0 %splat = shufflevector %head, poison, zeroinitializer ret %splat } ; Test that we fold this to a vlse with 0 stride. define @vsplat_load_nxv8f32(ptr %ptr) { ; OPTIMIZED-LABEL: vsplat_load_nxv8f32: ; OPTIMIZED: # %bb.0: ; OPTIMIZED-NEXT: vsetvli a1, zero, e32, m4, ta, ma ; OPTIMIZED-NEXT: vlse32.v v8, (a0), zero ; OPTIMIZED-NEXT: ret ; ; NOT-OPTIMIZED-LABEL: vsplat_load_nxv8f32: ; NOT-OPTIMIZED: # %bb.0: ; NOT-OPTIMIZED-NEXT: flw fa5, 0(a0) ; NOT-OPTIMIZED-NEXT: vsetvli a0, zero, e32, m4, ta, ma ; NOT-OPTIMIZED-NEXT: vfmv.v.f v8, fa5 ; NOT-OPTIMIZED-NEXT: ret %f = load float, ptr %ptr %head = insertelement poison, float %f, i32 0 %splat = shufflevector %head, poison, zeroinitializer ret %splat }