; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+f,+d \ ; RUN: -target-abi=ilp32d -verify-machineinstrs | FileCheck %s --check-prefix=RV32 ; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \ ; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i32 ; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+f,+d \ ; RUN: -target-abi=lp64d -verify-machineinstrs | FileCheck %s --check-prefix=RV64-i64 define <1 x iXLen> @lrint_v1f32(<1 x float> %x) { ; RV32-LABEL: lrint_v1f32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v1f32: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vmv.s.x v8, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v1f32: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i64-NEXT: vmv.s.x v8, a0 ; RV64-i64-NEXT: ret %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float> %x) ret <1 x iXLen> %a } declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f32(<1 x float>) define <2 x iXLen> @lrint_v2f32(<2 x float> %x) { ; RV32-LABEL: lrint_v2f32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa5, v9 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a1, fa5 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v2f32: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i32-NEXT: vfmv.f.s fa5, v9 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a1, fa5 ; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-i32-NEXT: vmv.v.x v8, a1 ; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v2f32: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a1, fa5 ; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-i64-NEXT: vmv.v.x v8, a1 ; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 ; RV64-i64-NEXT: ret %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float> %x) ret <2 x iXLen> %a } declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f32(<2 x float>) define <3 x iXLen> @lrint_v3f32(<3 x float> %x) { ; RV32-LABEL: lrint_v3f32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa5, v9 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a1, fa5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vslide1down.vx v9, v9, a0 ; RV32-NEXT: vslidedown.vi v10, v8, 2 ; RV32-NEXT: vfmv.f.s fa5, v10 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vslide1down.vx v9, v9, a0 ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vslide1down.vx v8, v9, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v3f32: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i32-NEXT: vfmv.f.s fa5, v9 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a1, fa5 ; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-i32-NEXT: vmv.v.x v9, a1 ; RV64-i32-NEXT: vslide1down.vx v9, v9, a0 ; RV64-i32-NEXT: vslidedown.vi v10, v8, 2 ; RV64-i32-NEXT: vfmv.f.s fa5, v10 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vslide1down.vx v9, v9, a0 ; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vslide1down.vx v8, v9, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v3f32: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a1, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vmv.v.x v10, a1 ; RV64-i64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v9, v8, 2 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vslide1down.vx v8, v10, a0 ; RV64-i64-NEXT: ret %a = call <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float> %x) ret <3 x iXLen> %a } declare <3 x iXLen> @llvm.lrint.v3iXLen.v3f32(<3 x float>) define <4 x iXLen> @lrint_v4f32(<4 x float> %x) { ; RV32-LABEL: lrint_v4f32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa5, v9 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a1, fa5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vslide1down.vx v9, v9, a0 ; RV32-NEXT: vslidedown.vi v10, v8, 2 ; RV32-NEXT: vfmv.f.s fa5, v10 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vslide1down.vx v9, v9, a0 ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vslide1down.vx v8, v9, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v4f32: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i32-NEXT: vfmv.f.s fa5, v9 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a1, fa5 ; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-i32-NEXT: vmv.v.x v9, a1 ; RV64-i32-NEXT: vslide1down.vx v9, v9, a0 ; RV64-i32-NEXT: vslidedown.vi v10, v8, 2 ; RV64-i32-NEXT: vfmv.f.s fa5, v10 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vslide1down.vx v9, v9, a0 ; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vslide1down.vx v8, v9, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v4f32: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a1, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vmv.v.x v10, a1 ; RV64-i64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v9, v8, 2 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vslide1down.vx v8, v10, a0 ; RV64-i64-NEXT: ret %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float> %x) ret <4 x iXLen> %a } declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f32(<4 x float>) define <8 x iXLen> @lrint_v8f32(<8 x float> %x) { ; RV32-LABEL: lrint_v8f32: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 1 ; RV32-NEXT: vfmv.f.s fa5, v10 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a1, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 2 ; RV32-NEXT: vfmv.f.s fa5, v12 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 3 ; RV32-NEXT: vfmv.f.s fa5, v12 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 4 ; RV32-NEXT: vfmv.f.s fa5, v12 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 5 ; RV32-NEXT: vfmv.f.s fa5, v12 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 6 ; RV32-NEXT: vfmv.f.s fa5, v12 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 7 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.s a0, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v8, v10, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v8f32: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v10, v8, 1 ; RV64-i32-NEXT: vfmv.f.s fa5, v10 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a1, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vmv.v.x v10, a1 ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v12, v8, 2 ; RV64-i32-NEXT: vfmv.f.s fa5, v12 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v12, v8, 3 ; RV64-i32-NEXT: vfmv.f.s fa5, v12 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v12, v8, 4 ; RV64-i32-NEXT: vfmv.f.s fa5, v12 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v12, v8, 5 ; RV64-i32-NEXT: vfmv.f.s fa5, v12 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v12, v8, 6 ; RV64-i32-NEXT: vfmv.f.s fa5, v12 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v8, v8, 7 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.s a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vslide1down.vx v8, v10, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v8f32: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: addi sp, sp, -128 ; RV64-i64-NEXT: .cfi_def_cfa_offset 128 ; RV64-i64-NEXT: sd ra, 120(sp) # 8-byte Folded Spill ; RV64-i64-NEXT: sd s0, 112(sp) # 8-byte Folded Spill ; RV64-i64-NEXT: .cfi_offset ra, -8 ; RV64-i64-NEXT: .cfi_offset s0, -16 ; RV64-i64-NEXT: addi s0, sp, 128 ; RV64-i64-NEXT: .cfi_def_cfa s0, 0 ; RV64-i64-NEXT: andi sp, sp, -64 ; RV64-i64-NEXT: vsetivli zero, 1, e32, m2, ta, ma ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 0(sp) ; RV64-i64-NEXT: vslidedown.vi v10, v8, 7 ; RV64-i64-NEXT: vfmv.f.s fa5, v10 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 56(sp) ; RV64-i64-NEXT: vslidedown.vi v10, v8, 6 ; RV64-i64-NEXT: vfmv.f.s fa5, v10 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 48(sp) ; RV64-i64-NEXT: vslidedown.vi v10, v8, 5 ; RV64-i64-NEXT: vfmv.f.s fa5, v10 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 40(sp) ; RV64-i64-NEXT: vslidedown.vi v10, v8, 4 ; RV64-i64-NEXT: vfmv.f.s fa5, v10 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 32(sp) ; RV64-i64-NEXT: vsetivli zero, 1, e32, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v9, v8, 3 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 24(sp) ; RV64-i64-NEXT: vslidedown.vi v9, v8, 2 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 16(sp) ; RV64-i64-NEXT: vslidedown.vi v8, v8, 1 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.s a0, fa5 ; RV64-i64-NEXT: sd a0, 8(sp) ; RV64-i64-NEXT: mv a0, sp ; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-i64-NEXT: vle64.v v8, (a0) ; RV64-i64-NEXT: addi sp, s0, -128 ; RV64-i64-NEXT: ld ra, 120(sp) # 8-byte Folded Reload ; RV64-i64-NEXT: ld s0, 112(sp) # 8-byte Folded Reload ; RV64-i64-NEXT: addi sp, sp, 128 ; RV64-i64-NEXT: ret %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float> %x) ret <8 x iXLen> %a } declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f32(<8 x float>) define <16 x iXLen> @lrint_v16iXLen_v16f32(<16 x float> %x) { %a = call <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float> %x) ret <16 x iXLen> %a } declare <16 x iXLen> @llvm.lrint.v16iXLen.v16f32(<16 x float>) define <1 x iXLen> @lrint_v1f64(<1 x double> %x) { ; RV32-LABEL: lrint_v1f64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV32-NEXT: vmv.s.x v8, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v1f64: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; RV64-i32-NEXT: vmv.s.x v8, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v1f64: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: vmv.s.x v8, a0 ; RV64-i64-NEXT: ret %a = call <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double> %x) ret <1 x iXLen> %a } declare <1 x iXLen> @llvm.lrint.v1iXLen.v1f64(<1 x double>) define <2 x iXLen> @lrint_v2f64(<2 x double> %x) { ; RV32-LABEL: lrint_v2f64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v9, v8, 1 ; RV32-NEXT: vfmv.f.s fa5, v9 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.d a1, fa5 ; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV32-NEXT: vmv.v.x v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v2f64: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i32-NEXT: vfmv.f.s fa5, v9 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.d a1, fa5 ; RV64-i32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; RV64-i32-NEXT: vmv.v.x v8, a1 ; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v2f64: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v9, v8, 1 ; RV64-i64-NEXT: vfmv.f.s fa5, v9 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.d a1, fa5 ; RV64-i64-NEXT: vsetivli zero, 2, e64, m1, ta, ma ; RV64-i64-NEXT: vmv.v.x v8, a1 ; RV64-i64-NEXT: vslide1down.vx v8, v8, a0 ; RV64-i64-NEXT: ret %a = call <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double> %x) ret <2 x iXLen> %a } declare <2 x iXLen> @llvm.lrint.v2iXLen.v2f64(<2 x double>) define <4 x iXLen> @lrint_v4f64(<4 x double> %x) { ; RV32-LABEL: lrint_v4f64: ; RV32: # %bb.0: ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 1 ; RV32-NEXT: vfmv.f.s fa5, v10 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.d a1, fa5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 2 ; RV32-NEXT: vfmv.f.s fa5, v12 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV32-NEXT: vslide1down.vx v8, v10, a0 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v4f64: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v10, v8, 1 ; RV64-i32-NEXT: vfmv.f.s fa5, v10 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.d a1, fa5 ; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-i32-NEXT: vmv.v.x v10, a1 ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v12, v8, 2 ; RV64-i32-NEXT: vfmv.f.s fa5, v12 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; RV64-i32-NEXT: vslide1down.vx v8, v10, a0 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v4f64: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v10, v8, 1 ; RV64-i64-NEXT: vfmv.f.s fa5, v10 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.d a1, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vmv.v.x v10, a1 ; RV64-i64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-i64-NEXT: vslidedown.vi v12, v8, 2 ; RV64-i64-NEXT: vfmv.f.s fa5, v12 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-i64-NEXT: vslidedown.vi v8, v8, 3 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: vsetivli zero, 4, e64, m2, ta, ma ; RV64-i64-NEXT: vslide1down.vx v8, v10, a0 ; RV64-i64-NEXT: ret %a = call <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double> %x) ret <4 x iXLen> %a } declare <4 x iXLen> @llvm.lrint.v4iXLen.v4f64(<4 x double>) define <8 x iXLen> @lrint_v8f64(<8 x double> %x) { ; RV32-LABEL: lrint_v8f64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -128 ; RV32-NEXT: .cfi_def_cfa_offset 128 ; RV32-NEXT: sw ra, 124(sp) # 4-byte Folded Spill ; RV32-NEXT: sw s0, 120(sp) # 4-byte Folded Spill ; RV32-NEXT: .cfi_offset ra, -4 ; RV32-NEXT: .cfi_offset s0, -8 ; RV32-NEXT: addi s0, sp, 128 ; RV32-NEXT: .cfi_def_cfa s0, 0 ; RV32-NEXT: andi sp, sp, -64 ; RV32-NEXT: mv a0, sp ; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV32-NEXT: vse64.v v8, (a0) ; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV32-NEXT: vslidedown.vi v10, v8, 1 ; RV32-NEXT: vfmv.f.s fa5, v10 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: fcvt.w.d a1, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vi v12, v8, 2 ; RV32-NEXT: vfmv.f.s fa5, v12 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: vslide1down.vx v10, v10, a0 ; RV32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV32-NEXT: vslidedown.vi v8, v8, 3 ; RV32-NEXT: vfmv.f.s fa5, v8 ; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV32-NEXT: fld fa4, 32(sp) ; RV32-NEXT: fld fa3, 40(sp) ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: fld fa5, 48(sp) ; RV32-NEXT: fcvt.w.d a1, fa4 ; RV32-NEXT: fcvt.w.d a2, fa3 ; RV32-NEXT: vslide1down.vx v8, v10, a0 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: fld fa5, 56(sp) ; RV32-NEXT: vslide1down.vx v8, v8, a1 ; RV32-NEXT: vslide1down.vx v8, v8, a2 ; RV32-NEXT: vslide1down.vx v8, v8, a0 ; RV32-NEXT: fcvt.w.d a0, fa5 ; RV32-NEXT: vslide1down.vx v8, v8, a0 ; RV32-NEXT: addi sp, s0, -128 ; RV32-NEXT: lw ra, 124(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 120(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 128 ; RV32-NEXT: ret ; ; RV64-i32-LABEL: lrint_v8f64: ; RV64-i32: # %bb.0: ; RV64-i32-NEXT: addi sp, sp, -128 ; RV64-i32-NEXT: .cfi_def_cfa_offset 128 ; RV64-i32-NEXT: sd ra, 120(sp) # 8-byte Folded Spill ; RV64-i32-NEXT: sd s0, 112(sp) # 8-byte Folded Spill ; RV64-i32-NEXT: .cfi_offset ra, -8 ; RV64-i32-NEXT: .cfi_offset s0, -16 ; RV64-i32-NEXT: addi s0, sp, 128 ; RV64-i32-NEXT: .cfi_def_cfa s0, 0 ; RV64-i32-NEXT: andi sp, sp, -64 ; RV64-i32-NEXT: mv a0, sp ; RV64-i32-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-i32-NEXT: vse64.v v8, (a0) ; RV64-i32-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i32-NEXT: vslidedown.vi v10, v8, 1 ; RV64-i32-NEXT: vfmv.f.s fa5, v10 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: fcvt.l.d a1, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vmv.v.x v10, a1 ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v12, v8, 2 ; RV64-i32-NEXT: vfmv.f.s fa5, v12 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: vslide1down.vx v10, v10, a0 ; RV64-i32-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-i32-NEXT: vslidedown.vi v8, v8, 3 ; RV64-i32-NEXT: vfmv.f.s fa5, v8 ; RV64-i32-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; RV64-i32-NEXT: fld fa4, 32(sp) ; RV64-i32-NEXT: fld fa3, 40(sp) ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: fld fa5, 48(sp) ; RV64-i32-NEXT: fcvt.l.d a1, fa4 ; RV64-i32-NEXT: fcvt.l.d a2, fa3 ; RV64-i32-NEXT: vslide1down.vx v8, v10, a0 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: fld fa5, 56(sp) ; RV64-i32-NEXT: vslide1down.vx v8, v8, a1 ; RV64-i32-NEXT: vslide1down.vx v8, v8, a2 ; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 ; RV64-i32-NEXT: fcvt.l.d a0, fa5 ; RV64-i32-NEXT: vslide1down.vx v8, v8, a0 ; RV64-i32-NEXT: addi sp, s0, -128 ; RV64-i32-NEXT: ld ra, 120(sp) # 8-byte Folded Reload ; RV64-i32-NEXT: ld s0, 112(sp) # 8-byte Folded Reload ; RV64-i32-NEXT: addi sp, sp, 128 ; RV64-i32-NEXT: ret ; ; RV64-i64-LABEL: lrint_v8f64: ; RV64-i64: # %bb.0: ; RV64-i64-NEXT: addi sp, sp, -192 ; RV64-i64-NEXT: .cfi_def_cfa_offset 192 ; RV64-i64-NEXT: sd ra, 184(sp) # 8-byte Folded Spill ; RV64-i64-NEXT: sd s0, 176(sp) # 8-byte Folded Spill ; RV64-i64-NEXT: .cfi_offset ra, -8 ; RV64-i64-NEXT: .cfi_offset s0, -16 ; RV64-i64-NEXT: addi s0, sp, 192 ; RV64-i64-NEXT: .cfi_def_cfa s0, 0 ; RV64-i64-NEXT: andi sp, sp, -64 ; RV64-i64-NEXT: mv a0, sp ; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-i64-NEXT: vse64.v v8, (a0) ; RV64-i64-NEXT: fld fa5, 56(sp) ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 120(sp) ; RV64-i64-NEXT: fld fa5, 48(sp) ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 112(sp) ; RV64-i64-NEXT: fld fa5, 40(sp) ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 104(sp) ; RV64-i64-NEXT: fld fa5, 32(sp) ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 96(sp) ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 64(sp) ; RV64-i64-NEXT: vsetivli zero, 1, e64, m1, ta, ma ; RV64-i64-NEXT: vslidedown.vi v10, v8, 1 ; RV64-i64-NEXT: vfmv.f.s fa5, v10 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 72(sp) ; RV64-i64-NEXT: vsetivli zero, 1, e64, m2, ta, ma ; RV64-i64-NEXT: vslidedown.vi v10, v8, 3 ; RV64-i64-NEXT: vfmv.f.s fa5, v10 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 88(sp) ; RV64-i64-NEXT: vslidedown.vi v8, v8, 2 ; RV64-i64-NEXT: vfmv.f.s fa5, v8 ; RV64-i64-NEXT: fcvt.l.d a0, fa5 ; RV64-i64-NEXT: sd a0, 80(sp) ; RV64-i64-NEXT: addi a0, sp, 64 ; RV64-i64-NEXT: vsetivli zero, 8, e64, m4, ta, ma ; RV64-i64-NEXT: vle64.v v8, (a0) ; RV64-i64-NEXT: addi sp, s0, -192 ; RV64-i64-NEXT: ld ra, 184(sp) # 8-byte Folded Reload ; RV64-i64-NEXT: ld s0, 176(sp) # 8-byte Folded Reload ; RV64-i64-NEXT: addi sp, sp, 192 ; RV64-i64-NEXT: ret %a = call <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double> %x) ret <8 x iXLen> %a } declare <8 x iXLen> @llvm.lrint.v8iXLen.v8f64(<8 x double>)