; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | \ ; RUN: FileCheck %s -check-prefix=RV32 ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | \ ; RUN: FileCheck %s -check-prefix=RV64 ; ================================================================================ ; trunc ; ================================================================================ declare @llvm.trunc.nxv1f64() define @trunc_nxv1f64_to_si8( %x) { ; RV32-LABEL: trunc_nxv1f64_to_si8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI0_0) ; RV32-NEXT: fld fa5, %lo(.LCPI0_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_si8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI0_0) ; RV64-NEXT: fld fa5, %lo(.LCPI0_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv1f64_to_ui8( %x) { ; RV32-LABEL: trunc_nxv1f64_to_ui8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI1_0) ; RV32-NEXT: fld fa5, %lo(.LCPI1_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_ui8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI1_0) ; RV64-NEXT: fld fa5, %lo(.LCPI1_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptoui %a to ret %b } define @trunc_nxv1f64_to_si16( %x) { ; RV32-LABEL: trunc_nxv1f64_to_si16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI2_0) ; RV32-NEXT: fld fa5, %lo(.LCPI2_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_si16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI2_0) ; RV64-NEXT: fld fa5, %lo(.LCPI2_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv1f64_to_ui16( %x) { ; RV32-LABEL: trunc_nxv1f64_to_ui16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI3_0) ; RV32-NEXT: fld fa5, %lo(.LCPI3_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_ui16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI3_0) ; RV64-NEXT: fld fa5, %lo(.LCPI3_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptoui %a to ret %b } define @trunc_nxv1f64_to_si32( %x) { ; RV32-LABEL: trunc_nxv1f64_to_si32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_si32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv1f64_to_ui32( %x) { ; RV32-LABEL: trunc_nxv1f64_to_ui32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_ui32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptoui %a to ret %b } define @trunc_nxv1f64_to_si64( %x) { ; RV32-LABEL: trunc_nxv1f64_to_si64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_si64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv1f64_to_ui64( %x) { ; RV32-LABEL: trunc_nxv1f64_to_ui64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv1f64_to_ui64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv1f64( %x) %b = fptoui %a to ret %b } ; ================================================================================ ; trunc ; ================================================================================ declare @llvm.trunc.nxv4f64() define @trunc_nxv4f64_to_si8( %x) { ; RV32-LABEL: trunc_nxv4f64_to_si8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI8_0) ; RV32-NEXT: fld fa5, %lo(.LCPI8_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_si8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI8_0) ; RV64-NEXT: fld fa5, %lo(.LCPI8_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv4f64_to_ui8( %x) { ; RV32-LABEL: trunc_nxv4f64_to_ui8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI9_0) ; RV32-NEXT: fld fa5, %lo(.LCPI9_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_ui8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI9_0) ; RV64-NEXT: fld fa5, %lo(.LCPI9_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptoui %a to ret %b } define @trunc_nxv4f64_to_si16( %x) { ; RV32-LABEL: trunc_nxv4f64_to_si16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI10_0) ; RV32-NEXT: fld fa5, %lo(.LCPI10_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_si16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI10_0) ; RV64-NEXT: fld fa5, %lo(.LCPI10_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv4f64_to_ui16( %x) { ; RV32-LABEL: trunc_nxv4f64_to_ui16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI11_0) ; RV32-NEXT: fld fa5, %lo(.LCPI11_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_ui16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI11_0) ; RV64-NEXT: fld fa5, %lo(.LCPI11_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.rtz.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptoui %a to ret %b } define @trunc_nxv4f64_to_si32( %x) { ; RV32-LABEL: trunc_nxv4f64_to_si32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_si32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv4f64_to_ui32( %x) { ; RV32-LABEL: trunc_nxv4f64_to_ui32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_ui32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptoui %a to ret %b } define @trunc_nxv4f64_to_si64( %x) { ; RV32-LABEL: trunc_nxv4f64_to_si64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfcvt.rtz.x.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_si64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfcvt.rtz.x.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptosi %a to ret %b } define @trunc_nxv4f64_to_ui64( %x) { ; RV32-LABEL: trunc_nxv4f64_to_ui64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: trunc_nxv4f64_to_ui64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfcvt.rtz.xu.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.trunc.nxv4f64( %x) %b = fptoui %a to ret %b } ; ================================================================================ ; ceil ; ================================================================================ declare @llvm.ceil.nxv1f64() define @ceil_nxv1f64_to_si8( %x) { ; RV32-LABEL: ceil_nxv1f64_to_si8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI16_0) ; RV32-NEXT: fld fa5, %lo(.LCPI16_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_si8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI16_0) ; RV64-NEXT: fld fa5, %lo(.LCPI16_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv1f64_to_ui8( %x) { ; RV32-LABEL: ceil_nxv1f64_to_ui8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI17_0) ; RV32-NEXT: fld fa5, %lo(.LCPI17_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_ui8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI17_0) ; RV64-NEXT: fld fa5, %lo(.LCPI17_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptoui %a to ret %b } define @ceil_nxv1f64_to_si16( %x) { ; RV32-LABEL: ceil_nxv1f64_to_si16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI18_0) ; RV32-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_si16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI18_0) ; RV64-NEXT: fld fa5, %lo(.LCPI18_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv1f64_to_ui16( %x) { ; RV32-LABEL: ceil_nxv1f64_to_ui16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI19_0) ; RV32-NEXT: fld fa5, %lo(.LCPI19_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_ui16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI19_0) ; RV64-NEXT: fld fa5, %lo(.LCPI19_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptoui %a to ret %b } define @ceil_nxv1f64_to_si32( %x) { ; RV32-LABEL: ceil_nxv1f64_to_si32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfncvt.x.f.w v9, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_si32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfncvt.x.f.w v9, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv1f64_to_ui32( %x) { ; RV32-LABEL: ceil_nxv1f64_to_ui32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfncvt.xu.f.w v9, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_ui32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfncvt.xu.f.w v9, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptoui %a to ret %b } define @ceil_nxv1f64_to_si64( %x) { ; RV32-LABEL: ceil_nxv1f64_to_si64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v8, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_si64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v8, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv1f64_to_ui64( %x) { ; RV32-LABEL: ceil_nxv1f64_to_ui64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.xu.f.v v8, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv1f64_to_ui64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.xu.f.v v8, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv1f64( %x) %b = fptoui %a to ret %b } ; ================================================================================ ; ceil ; ================================================================================ declare @llvm.ceil.nxv4f64() define @ceil_nxv4f64_to_si8( %x) { ; RV32-LABEL: ceil_nxv4f64_to_si8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI24_0) ; RV32-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_si8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI24_0) ; RV64-NEXT: fld fa5, %lo(.LCPI24_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv4f64_to_ui8( %x) { ; RV32-LABEL: ceil_nxv4f64_to_ui8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI25_0) ; RV32-NEXT: fld fa5, %lo(.LCPI25_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_ui8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI25_0) ; RV64-NEXT: fld fa5, %lo(.LCPI25_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptoui %a to ret %b } define @ceil_nxv4f64_to_si16( %x) { ; RV32-LABEL: ceil_nxv4f64_to_si16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI26_0) ; RV32-NEXT: fld fa5, %lo(.LCPI26_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_si16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI26_0) ; RV64-NEXT: fld fa5, %lo(.LCPI26_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv4f64_to_ui16( %x) { ; RV32-LABEL: ceil_nxv4f64_to_ui16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI27_0) ; RV32-NEXT: fld fa5, %lo(.LCPI27_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_ui16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI27_0) ; RV64-NEXT: fld fa5, %lo(.LCPI27_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptoui %a to ret %b } define @ceil_nxv4f64_to_si32( %x) { ; RV32-LABEL: ceil_nxv4f64_to_si32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfncvt.x.f.w v12, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_si32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfncvt.x.f.w v12, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv4f64_to_ui32( %x) { ; RV32-LABEL: ceil_nxv4f64_to_ui32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfncvt.xu.f.w v12, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_ui32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfncvt.xu.f.w v12, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptoui %a to ret %b } define @ceil_nxv4f64_to_si64( %x) { ; RV32-LABEL: ceil_nxv4f64_to_si64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.x.f.v v8, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_si64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.x.f.v v8, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptosi %a to ret %b } define @ceil_nxv4f64_to_ui64( %x) { ; RV32-LABEL: ceil_nxv4f64_to_ui64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: fsrmi a0, 3 ; RV32-NEXT: vfcvt.xu.f.v v8, v8 ; RV32-NEXT: fsrm a0 ; RV32-NEXT: ret ; ; RV64-LABEL: ceil_nxv4f64_to_ui64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: fsrmi a0, 3 ; RV64-NEXT: vfcvt.xu.f.v v8, v8 ; RV64-NEXT: fsrm a0 ; RV64-NEXT: ret %a = call @llvm.ceil.nxv4f64( %x) %b = fptoui %a to ret %b } ; ================================================================================ ; rint ; ================================================================================ declare @llvm.rint.nxv1f64() define @rint_nxv1f64_to_si8( %x) { ; RV32-LABEL: rint_nxv1f64_to_si8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI32_0) ; RV32-NEXT: fld fa5, %lo(.LCPI32_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_si8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI32_0) ; RV64-NEXT: fld fa5, %lo(.LCPI32_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptosi %a to ret %b } define @rint_nxv1f64_to_ui8( %x) { ; RV32-LABEL: rint_nxv1f64_to_ui8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI33_0) ; RV32-NEXT: fld fa5, %lo(.LCPI33_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_ui8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI33_0) ; RV64-NEXT: fld fa5, %lo(.LCPI33_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptoui %a to ret %b } define @rint_nxv1f64_to_si16( %x) { ; RV32-LABEL: rint_nxv1f64_to_si16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI34_0) ; RV32-NEXT: fld fa5, %lo(.LCPI34_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_si16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI34_0) ; RV64-NEXT: fld fa5, %lo(.LCPI34_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptosi %a to ret %b } define @rint_nxv1f64_to_ui16( %x) { ; RV32-LABEL: rint_nxv1f64_to_ui16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI35_0) ; RV32-NEXT: fld fa5, %lo(.LCPI35_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfabs.v v9, v8 ; RV32-NEXT: vmflt.vf v0, v9, fa5 ; RV32-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV32-NEXT: vnsrl.wi v8, v9, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_ui16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI35_0) ; RV64-NEXT: fld fa5, %lo(.LCPI35_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfabs.v v9, v8 ; RV64-NEXT: vmflt.vf v0, v9, fa5 ; RV64-NEXT: vfcvt.x.f.v v9, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v9, v9, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m1, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v9, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v9, v8 ; RV64-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; RV64-NEXT: vnsrl.wi v8, v9, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptoui %a to ret %b } define @rint_nxv1f64_to_si32( %x) { ; RV32-LABEL: rint_nxv1f64_to_si32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.x.f.w v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_si32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.x.f.w v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptosi %a to ret %b } define @rint_nxv1f64_to_ui32( %x) { ; RV32-LABEL: rint_nxv1f64_to_ui32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV32-NEXT: vfncvt.xu.f.w v9, v8 ; RV32-NEXT: vmv1r.v v8, v9 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_ui32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, mf2, ta, ma ; RV64-NEXT: vfncvt.xu.f.w v9, v8 ; RV64-NEXT: vmv1r.v v8, v9 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptoui %a to ret %b } define @rint_nxv1f64_to_si64( %x) { ; RV32-LABEL: rint_nxv1f64_to_si64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfcvt.x.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_si64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfcvt.x.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptosi %a to ret %b } define @rint_nxv1f64_to_ui64( %x) { ; RV32-LABEL: rint_nxv1f64_to_ui64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV32-NEXT: vfcvt.xu.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv1f64_to_ui64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m1, ta, ma ; RV64-NEXT: vfcvt.xu.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.rint.nxv1f64( %x) %b = fptoui %a to ret %b } ; ================================================================================ ; rint ; ================================================================================ declare @llvm.rint.nxv4f64() define @rint_nxv4f64_to_si8( %x) { ; RV32-LABEL: rint_nxv4f64_to_si8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI40_0) ; RV32-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_si8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI40_0) ; RV64-NEXT: fld fa5, %lo(.LCPI40_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptosi %a to ret %b } define @rint_nxv4f64_to_ui8( %x) { ; RV32-LABEL: rint_nxv4f64_to_ui8: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI41_0) ; RV32-NEXT: fld fa5, %lo(.LCPI41_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV32-NEXT: vnsrl.wi v8, v8, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_ui8: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI41_0) ; RV64-NEXT: fld fa5, %lo(.LCPI41_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: vsetvli zero, zero, e8, mf2, ta, ma ; RV64-NEXT: vnsrl.wi v8, v8, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptoui %a to ret %b } define @rint_nxv4f64_to_si16( %x) { ; RV32-LABEL: rint_nxv4f64_to_si16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI42_0) ; RV32-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_si16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI42_0) ; RV64-NEXT: fld fa5, %lo(.LCPI42_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.x.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptosi %a to ret %b } define @rint_nxv4f64_to_ui16( %x) { ; RV32-LABEL: rint_nxv4f64_to_ui16: ; RV32: # %bb.0: ; RV32-NEXT: lui a0, %hi(.LCPI43_0) ; RV32-NEXT: fld fa5, %lo(.LCPI43_0)(a0) ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfabs.v v12, v8 ; RV32-NEXT: vmflt.vf v0, v12, fa5 ; RV32-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV32-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV32-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV32-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV32-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV32-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV32-NEXT: vnsrl.wi v8, v12, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_ui16: ; RV64: # %bb.0: ; RV64-NEXT: lui a0, %hi(.LCPI43_0) ; RV64-NEXT: fld fa5, %lo(.LCPI43_0)(a0) ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfabs.v v12, v8 ; RV64-NEXT: vmflt.vf v0, v12, fa5 ; RV64-NEXT: vfcvt.x.f.v v12, v8, v0.t ; RV64-NEXT: vfcvt.f.x.v v12, v12, v0.t ; RV64-NEXT: vsetvli zero, zero, e64, m4, ta, mu ; RV64-NEXT: vfsgnj.vv v8, v12, v8, v0.t ; RV64-NEXT: vsetvli zero, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.rtz.xu.f.w v12, v8 ; RV64-NEXT: vsetvli zero, zero, e16, m1, ta, ma ; RV64-NEXT: vnsrl.wi v8, v12, 0 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptoui %a to ret %b } define @rint_nxv4f64_to_si32( %x) { ; RV32-LABEL: rint_nxv4f64_to_si32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.x.f.w v12, v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_si32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.x.f.w v12, v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptosi %a to ret %b } define @rint_nxv4f64_to_ui32( %x) { ; RV32-LABEL: rint_nxv4f64_to_ui32: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV32-NEXT: vfncvt.xu.f.w v12, v8 ; RV32-NEXT: vmv.v.v v8, v12 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_ui32: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e32, m2, ta, ma ; RV64-NEXT: vfncvt.xu.f.w v12, v8 ; RV64-NEXT: vmv.v.v v8, v12 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptoui %a to ret %b } define @rint_nxv4f64_to_si64( %x) { ; RV32-LABEL: rint_nxv4f64_to_si64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfcvt.x.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_si64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfcvt.x.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptosi %a to ret %b } define @rint_nxv4f64_to_ui64( %x) { ; RV32-LABEL: rint_nxv4f64_to_ui64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV32-NEXT: vfcvt.xu.f.v v8, v8 ; RV32-NEXT: ret ; ; RV64-LABEL: rint_nxv4f64_to_ui64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli a0, zero, e64, m4, ta, ma ; RV64-NEXT: vfcvt.xu.f.v v8, v8 ; RV64-NEXT: ret %a = call @llvm.rint.nxv4f64( %x) %b = fptoui %a to ret %b }