; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32d | FileCheck -check-prefixes=CHECKIFD,RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64d | FileCheck -check-prefixes=CHECKIFD,RV64IFD %s ; RUN: llc -mtriple=riscv32 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=ilp32 | FileCheck -check-prefixes=RV32IZFINXZDINX %s ; RUN: llc -mtriple=riscv64 -mattr=+zdinx -verify-machineinstrs < %s \ ; RUN: -target-abi=lp64 | FileCheck -check-prefixes=RV64IZFINXZDINX %s define signext i32 @test_floor_si32(double %x) { ; CHECKIFD-LABEL: test_floor_si32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rdn ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_floor_si32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rdn ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_floor_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rdn ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.floor.f64(double %x) %b = call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %b } define i64 @test_floor_si64(double %x) nounwind { ; RV32IFD-LABEL: test_floor_si64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: call floor ; RV32IFD-NEXT: lui a0, %hi(.LCPI1_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_0)(a0) ; RV32IFD-NEXT: fmv.d fs0, fa0 ; RV32IFD-NEXT: fle.d s0, fa5, fa0 ; RV32IFD-NEXT: call __fixdfdi ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: lui a2, 524288 ; RV32IFD-NEXT: beqz s0, .LBB1_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: mv a2, a1 ; RV32IFD-NEXT: .LBB1_2: ; RV32IFD-NEXT: lui a1, %hi(.LCPI1_1) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI1_1)(a1) ; RV32IFD-NEXT: flt.d a3, fa5, fs0 ; RV32IFD-NEXT: beqz a3, .LBB1_4 ; RV32IFD-NEXT: # %bb.3: ; RV32IFD-NEXT: addi a2, a4, -1 ; RV32IFD-NEXT: .LBB1_4: ; RV32IFD-NEXT: feq.d a1, fs0, fs0 ; RV32IFD-NEXT: neg a4, a1 ; RV32IFD-NEXT: and a1, a4, a2 ; RV32IFD-NEXT: neg a2, a3 ; RV32IFD-NEXT: neg a3, s0 ; RV32IFD-NEXT: and a0, a3, a0 ; RV32IFD-NEXT: or a0, a2, a0 ; RV32IFD-NEXT: and a0, a4, a0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_floor_si64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rdn ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_floor_si64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call floor ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI1_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI1_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI1_0)(a2) ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 ; RV32IZFINXZDINX-NEXT: call __fixdfdi ; RV32IZFINXZDINX-NEXT: lui a4, 524288 ; RV32IZFINXZDINX-NEXT: lui a2, 524288 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB1_2 ; RV32IZFINXZDINX-NEXT: # %bb.1: ; RV32IZFINXZDINX-NEXT: mv a2, a1 ; RV32IZFINXZDINX-NEXT: .LBB1_2: ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI1_1) ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI1_1)(a1) ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI1_1+4)(a1) ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB1_4 ; RV32IZFINXZDINX-NEXT: # %bb.3: ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 ; RV32IZFINXZDINX-NEXT: .LBB1_4: ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 ; RV32IZFINXZDINX-NEXT: neg a4, a1 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2 ; RV32IZFINXZDINX-NEXT: neg a2, s0 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0 ; RV32IZFINXZDINX-NEXT: neg a2, a3 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_floor_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rdn ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.floor.f64(double %x) %b = call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_floor_ui32(double %x) { ; CHECKIFD-LABEL: test_floor_ui32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rdn ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_floor_ui32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rdn ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_floor_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rdn ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.floor.f64(double %x) %b = call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %b } define i64 @test_floor_ui64(double %x) nounwind { ; RV32IFD-LABEL: test_floor_ui64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call floor ; RV32IFD-NEXT: lui a0, %hi(.LCPI3_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI3_0)(a0) ; RV32IFD-NEXT: flt.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 ; RV32IFD-NEXT: and a1, s1, a1 ; RV32IFD-NEXT: or a1, s0, a1 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_floor_ui64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rdn ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_floor_ui64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call floor ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg s2, a2 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI3_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI3_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI3_0)(a2) ; RV32IZFINXZDINX-NEXT: and a0, s2, a0 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg a2, a2 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_floor_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rdn ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.floor.f64(double %x) %b = call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_ceil_si32(double %x) { ; CHECKIFD-LABEL: test_ceil_si32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rup ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_ceil_si32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rup ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_ceil_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rup ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.ceil.f64(double %x) %b = call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %b } define i64 @test_ceil_si64(double %x) nounwind { ; RV32IFD-LABEL: test_ceil_si64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: call ceil ; RV32IFD-NEXT: lui a0, %hi(.LCPI5_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_0)(a0) ; RV32IFD-NEXT: fmv.d fs0, fa0 ; RV32IFD-NEXT: fle.d s0, fa5, fa0 ; RV32IFD-NEXT: call __fixdfdi ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: lui a2, 524288 ; RV32IFD-NEXT: beqz s0, .LBB5_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: mv a2, a1 ; RV32IFD-NEXT: .LBB5_2: ; RV32IFD-NEXT: lui a1, %hi(.LCPI5_1) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI5_1)(a1) ; RV32IFD-NEXT: flt.d a3, fa5, fs0 ; RV32IFD-NEXT: beqz a3, .LBB5_4 ; RV32IFD-NEXT: # %bb.3: ; RV32IFD-NEXT: addi a2, a4, -1 ; RV32IFD-NEXT: .LBB5_4: ; RV32IFD-NEXT: feq.d a1, fs0, fs0 ; RV32IFD-NEXT: neg a4, a1 ; RV32IFD-NEXT: and a1, a4, a2 ; RV32IFD-NEXT: neg a2, a3 ; RV32IFD-NEXT: neg a3, s0 ; RV32IFD-NEXT: and a0, a3, a0 ; RV32IFD-NEXT: or a0, a2, a0 ; RV32IFD-NEXT: and a0, a4, a0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_ceil_si64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rup ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_ceil_si64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call ceil ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI5_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI5_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI5_0)(a2) ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 ; RV32IZFINXZDINX-NEXT: call __fixdfdi ; RV32IZFINXZDINX-NEXT: lui a4, 524288 ; RV32IZFINXZDINX-NEXT: lui a2, 524288 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB5_2 ; RV32IZFINXZDINX-NEXT: # %bb.1: ; RV32IZFINXZDINX-NEXT: mv a2, a1 ; RV32IZFINXZDINX-NEXT: .LBB5_2: ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI5_1) ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI5_1)(a1) ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI5_1+4)(a1) ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB5_4 ; RV32IZFINXZDINX-NEXT: # %bb.3: ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 ; RV32IZFINXZDINX-NEXT: .LBB5_4: ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 ; RV32IZFINXZDINX-NEXT: neg a4, a1 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2 ; RV32IZFINXZDINX-NEXT: neg a2, s0 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0 ; RV32IZFINXZDINX-NEXT: neg a2, a3 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_ceil_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rup ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.ceil.f64(double %x) %b = call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_ceil_ui32(double %x) { ; CHECKIFD-LABEL: test_ceil_ui32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rup ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_ceil_ui32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rup ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_ceil_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rup ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.ceil.f64(double %x) %b = call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %b } define i64 @test_ceil_ui64(double %x) nounwind { ; RV32IFD-LABEL: test_ceil_ui64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call ceil ; RV32IFD-NEXT: lui a0, %hi(.LCPI7_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI7_0)(a0) ; RV32IFD-NEXT: flt.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 ; RV32IFD-NEXT: and a1, s1, a1 ; RV32IFD-NEXT: or a1, s0, a1 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_ceil_ui64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rup ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_ceil_ui64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call ceil ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg s2, a2 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI7_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI7_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI7_0)(a2) ; RV32IZFINXZDINX-NEXT: and a0, s2, a0 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg a2, a2 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_ceil_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rup ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.ceil.f64(double %x) %b = call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_trunc_si32(double %x) { ; CHECKIFD-LABEL: test_trunc_si32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rtz ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_trunc_si32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rtz ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_trunc_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rtz ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.trunc.f64(double %x) %b = call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %b } define i64 @test_trunc_si64(double %x) nounwind { ; RV32IFD-LABEL: test_trunc_si64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: call trunc ; RV32IFD-NEXT: lui a0, %hi(.LCPI9_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_0)(a0) ; RV32IFD-NEXT: fmv.d fs0, fa0 ; RV32IFD-NEXT: fle.d s0, fa5, fa0 ; RV32IFD-NEXT: call __fixdfdi ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: lui a2, 524288 ; RV32IFD-NEXT: beqz s0, .LBB9_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: mv a2, a1 ; RV32IFD-NEXT: .LBB9_2: ; RV32IFD-NEXT: lui a1, %hi(.LCPI9_1) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI9_1)(a1) ; RV32IFD-NEXT: flt.d a3, fa5, fs0 ; RV32IFD-NEXT: beqz a3, .LBB9_4 ; RV32IFD-NEXT: # %bb.3: ; RV32IFD-NEXT: addi a2, a4, -1 ; RV32IFD-NEXT: .LBB9_4: ; RV32IFD-NEXT: feq.d a1, fs0, fs0 ; RV32IFD-NEXT: neg a4, a1 ; RV32IFD-NEXT: and a1, a4, a2 ; RV32IFD-NEXT: neg a2, a3 ; RV32IFD-NEXT: neg a3, s0 ; RV32IFD-NEXT: and a0, a3, a0 ; RV32IFD-NEXT: or a0, a2, a0 ; RV32IFD-NEXT: and a0, a4, a0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_trunc_si64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rtz ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_trunc_si64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call trunc ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI9_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI9_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI9_0)(a2) ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 ; RV32IZFINXZDINX-NEXT: call __fixdfdi ; RV32IZFINXZDINX-NEXT: lui a4, 524288 ; RV32IZFINXZDINX-NEXT: lui a2, 524288 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB9_2 ; RV32IZFINXZDINX-NEXT: # %bb.1: ; RV32IZFINXZDINX-NEXT: mv a2, a1 ; RV32IZFINXZDINX-NEXT: .LBB9_2: ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI9_1) ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI9_1)(a1) ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI9_1+4)(a1) ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB9_4 ; RV32IZFINXZDINX-NEXT: # %bb.3: ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 ; RV32IZFINXZDINX-NEXT: .LBB9_4: ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 ; RV32IZFINXZDINX-NEXT: neg a4, a1 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2 ; RV32IZFINXZDINX-NEXT: neg a2, s0 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0 ; RV32IZFINXZDINX-NEXT: neg a2, a3 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_trunc_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rtz ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.trunc.f64(double %x) %b = call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_trunc_ui32(double %x) { ; CHECKIFD-LABEL: test_trunc_ui32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rtz ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_trunc_ui32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rtz ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_trunc_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rtz ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.trunc.f64(double %x) %b = call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %b } define i64 @test_trunc_ui64(double %x) nounwind { ; RV32IFD-LABEL: test_trunc_ui64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call trunc ; RV32IFD-NEXT: lui a0, %hi(.LCPI11_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI11_0)(a0) ; RV32IFD-NEXT: flt.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 ; RV32IFD-NEXT: and a1, s1, a1 ; RV32IFD-NEXT: or a1, s0, a1 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_trunc_ui64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rtz ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_trunc_ui64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call trunc ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg s2, a2 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI11_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI11_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI11_0)(a2) ; RV32IZFINXZDINX-NEXT: and a0, s2, a0 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg a2, a2 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_trunc_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rtz ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.trunc.f64(double %x) %b = call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_round_si32(double %x) { ; CHECKIFD-LABEL: test_round_si32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rmm ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_round_si32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rmm ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_round_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rmm ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.round.f64(double %x) %b = call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %b } define i64 @test_round_si64(double %x) nounwind { ; RV32IFD-LABEL: test_round_si64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: call round ; RV32IFD-NEXT: lui a0, %hi(.LCPI13_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_0)(a0) ; RV32IFD-NEXT: fmv.d fs0, fa0 ; RV32IFD-NEXT: fle.d s0, fa5, fa0 ; RV32IFD-NEXT: call __fixdfdi ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: lui a2, 524288 ; RV32IFD-NEXT: beqz s0, .LBB13_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: mv a2, a1 ; RV32IFD-NEXT: .LBB13_2: ; RV32IFD-NEXT: lui a1, %hi(.LCPI13_1) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI13_1)(a1) ; RV32IFD-NEXT: flt.d a3, fa5, fs0 ; RV32IFD-NEXT: beqz a3, .LBB13_4 ; RV32IFD-NEXT: # %bb.3: ; RV32IFD-NEXT: addi a2, a4, -1 ; RV32IFD-NEXT: .LBB13_4: ; RV32IFD-NEXT: feq.d a1, fs0, fs0 ; RV32IFD-NEXT: neg a4, a1 ; RV32IFD-NEXT: and a1, a4, a2 ; RV32IFD-NEXT: neg a2, a3 ; RV32IFD-NEXT: neg a3, s0 ; RV32IFD-NEXT: and a0, a3, a0 ; RV32IFD-NEXT: or a0, a2, a0 ; RV32IFD-NEXT: and a0, a4, a0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_round_si64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rmm ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_round_si64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call round ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI13_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI13_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI13_0)(a2) ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 ; RV32IZFINXZDINX-NEXT: call __fixdfdi ; RV32IZFINXZDINX-NEXT: lui a4, 524288 ; RV32IZFINXZDINX-NEXT: lui a2, 524288 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB13_2 ; RV32IZFINXZDINX-NEXT: # %bb.1: ; RV32IZFINXZDINX-NEXT: mv a2, a1 ; RV32IZFINXZDINX-NEXT: .LBB13_2: ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI13_1) ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI13_1)(a1) ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI13_1+4)(a1) ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB13_4 ; RV32IZFINXZDINX-NEXT: # %bb.3: ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 ; RV32IZFINXZDINX-NEXT: .LBB13_4: ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 ; RV32IZFINXZDINX-NEXT: neg a4, a1 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2 ; RV32IZFINXZDINX-NEXT: neg a2, s0 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0 ; RV32IZFINXZDINX-NEXT: neg a2, a3 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_round_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rmm ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.round.f64(double %x) %b = call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_round_ui32(double %x) { ; CHECKIFD-LABEL: test_round_ui32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rmm ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_round_ui32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rmm ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_round_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rmm ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.round.f64(double %x) %b = call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %b } define i64 @test_round_ui64(double %x) nounwind { ; RV32IFD-LABEL: test_round_ui64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call round ; RV32IFD-NEXT: lui a0, %hi(.LCPI15_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI15_0)(a0) ; RV32IFD-NEXT: flt.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 ; RV32IFD-NEXT: and a1, s1, a1 ; RV32IFD-NEXT: or a1, s0, a1 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_round_ui64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rmm ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_round_ui64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call round ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg s2, a2 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI15_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI15_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI15_0)(a2) ; RV32IZFINXZDINX-NEXT: and a0, s2, a0 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg a2, a2 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_round_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rmm ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.round.f64(double %x) %b = call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_roundeven_si32(double %x) { ; CHECKIFD-LABEL: test_roundeven_si32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.w.d a0, fa0, rne ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_roundeven_si32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0, rne ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_roundeven_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0, rne ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.roundeven.f64(double %x) %b = call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %b } define i64 @test_roundeven_si64(double %x) nounwind { ; RV32IFD-LABEL: test_roundeven_si64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: call roundeven ; RV32IFD-NEXT: lui a0, %hi(.LCPI17_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_0)(a0) ; RV32IFD-NEXT: fmv.d fs0, fa0 ; RV32IFD-NEXT: fle.d s0, fa5, fa0 ; RV32IFD-NEXT: call __fixdfdi ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: lui a2, 524288 ; RV32IFD-NEXT: beqz s0, .LBB17_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: mv a2, a1 ; RV32IFD-NEXT: .LBB17_2: ; RV32IFD-NEXT: lui a1, %hi(.LCPI17_1) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI17_1)(a1) ; RV32IFD-NEXT: flt.d a3, fa5, fs0 ; RV32IFD-NEXT: beqz a3, .LBB17_4 ; RV32IFD-NEXT: # %bb.3: ; RV32IFD-NEXT: addi a2, a4, -1 ; RV32IFD-NEXT: .LBB17_4: ; RV32IFD-NEXT: feq.d a1, fs0, fs0 ; RV32IFD-NEXT: neg a4, a1 ; RV32IFD-NEXT: and a1, a4, a2 ; RV32IFD-NEXT: neg a2, a3 ; RV32IFD-NEXT: neg a3, s0 ; RV32IFD-NEXT: and a0, a3, a0 ; RV32IFD-NEXT: or a0, a2, a0 ; RV32IFD-NEXT: and a0, a4, a0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_roundeven_si64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.l.d a0, fa0, rne ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_roundeven_si64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call roundeven ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI17_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI17_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI17_0)(a2) ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 ; RV32IZFINXZDINX-NEXT: call __fixdfdi ; RV32IZFINXZDINX-NEXT: lui a4, 524288 ; RV32IZFINXZDINX-NEXT: lui a2, 524288 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB17_2 ; RV32IZFINXZDINX-NEXT: # %bb.1: ; RV32IZFINXZDINX-NEXT: mv a2, a1 ; RV32IZFINXZDINX-NEXT: .LBB17_2: ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI17_1) ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI17_1)(a1) ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI17_1+4)(a1) ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB17_4 ; RV32IZFINXZDINX-NEXT: # %bb.3: ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 ; RV32IZFINXZDINX-NEXT: .LBB17_4: ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 ; RV32IZFINXZDINX-NEXT: neg a4, a1 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2 ; RV32IZFINXZDINX-NEXT: neg a2, s0 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0 ; RV32IZFINXZDINX-NEXT: neg a2, a3 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_roundeven_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0, rne ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.roundeven.f64(double %x) %b = call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_roundeven_ui32(double %x) { ; CHECKIFD-LABEL: test_roundeven_ui32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0, rne ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_roundeven_ui32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0, rne ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0, rne ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.roundeven.f64(double %x) %b = call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %b } define i64 @test_roundeven_ui64(double %x) nounwind { ; RV32IFD-LABEL: test_roundeven_ui64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call roundeven ; RV32IFD-NEXT: lui a0, %hi(.LCPI19_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI19_0)(a0) ; RV32IFD-NEXT: flt.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 ; RV32IFD-NEXT: and a1, s1, a1 ; RV32IFD-NEXT: or a1, s0, a1 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_roundeven_ui64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.lu.d a0, fa0, rne ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_roundeven_ui64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call roundeven ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg s2, a2 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI19_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI19_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI19_0)(a2) ; RV32IZFINXZDINX-NEXT: and a0, s2, a0 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg a2, a2 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_roundeven_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0, rne ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.roundeven.f64(double %x) %b = call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_rint_si32(double %x) { ; CHECKIFD-LABEL: test_rint_si32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.w.d a0, fa0 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_rint_si32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.w.d a2, a0 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_rint_si32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.w.d a1, a0 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.rint.f64(double %x) %b = call i32 @llvm.fptosi.sat.i32.f64(double %a) ret i32 %b } define i64 @test_rint_si64(double %x) nounwind { ; RV32IFD-LABEL: test_rint_si64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: call rint ; RV32IFD-NEXT: lui a0, %hi(.LCPI21_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_0)(a0) ; RV32IFD-NEXT: fmv.d fs0, fa0 ; RV32IFD-NEXT: fle.d s0, fa5, fa0 ; RV32IFD-NEXT: call __fixdfdi ; RV32IFD-NEXT: lui a4, 524288 ; RV32IFD-NEXT: lui a2, 524288 ; RV32IFD-NEXT: beqz s0, .LBB21_2 ; RV32IFD-NEXT: # %bb.1: ; RV32IFD-NEXT: mv a2, a1 ; RV32IFD-NEXT: .LBB21_2: ; RV32IFD-NEXT: lui a1, %hi(.LCPI21_1) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI21_1)(a1) ; RV32IFD-NEXT: flt.d a3, fa5, fs0 ; RV32IFD-NEXT: beqz a3, .LBB21_4 ; RV32IFD-NEXT: # %bb.3: ; RV32IFD-NEXT: addi a2, a4, -1 ; RV32IFD-NEXT: .LBB21_4: ; RV32IFD-NEXT: feq.d a1, fs0, fs0 ; RV32IFD-NEXT: neg a4, a1 ; RV32IFD-NEXT: and a1, a4, a2 ; RV32IFD-NEXT: neg a2, a3 ; RV32IFD-NEXT: neg a3, s0 ; RV32IFD-NEXT: and a0, a3, a0 ; RV32IFD-NEXT: or a0, a2, a0 ; RV32IFD-NEXT: and a0, a4, a0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_rint_si64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.l.d a0, fa0 ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_rint_si64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s3, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call rint ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s2, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s3, 12(sp) ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI21_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI21_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI21_0)(a2) ; RV32IZFINXZDINX-NEXT: fle.d s0, a2, s2 ; RV32IZFINXZDINX-NEXT: call __fixdfdi ; RV32IZFINXZDINX-NEXT: lui a4, 524288 ; RV32IZFINXZDINX-NEXT: lui a2, 524288 ; RV32IZFINXZDINX-NEXT: beqz s0, .LBB21_2 ; RV32IZFINXZDINX-NEXT: # %bb.1: ; RV32IZFINXZDINX-NEXT: mv a2, a1 ; RV32IZFINXZDINX-NEXT: .LBB21_2: ; RV32IZFINXZDINX-NEXT: lui a1, %hi(.LCPI21_1) ; RV32IZFINXZDINX-NEXT: lw a6, %lo(.LCPI21_1)(a1) ; RV32IZFINXZDINX-NEXT: lw a7, %lo(.LCPI21_1+4)(a1) ; RV32IZFINXZDINX-NEXT: flt.d a3, a6, s2 ; RV32IZFINXZDINX-NEXT: beqz a3, .LBB21_4 ; RV32IZFINXZDINX-NEXT: # %bb.3: ; RV32IZFINXZDINX-NEXT: addi a2, a4, -1 ; RV32IZFINXZDINX-NEXT: .LBB21_4: ; RV32IZFINXZDINX-NEXT: feq.d a1, s2, s2 ; RV32IZFINXZDINX-NEXT: neg a4, a1 ; RV32IZFINXZDINX-NEXT: and a1, a4, a2 ; RV32IZFINXZDINX-NEXT: neg a2, s0 ; RV32IZFINXZDINX-NEXT: and a0, a2, a0 ; RV32IZFINXZDINX-NEXT: neg a2, a3 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a0, a4, a0 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s3, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_rint_si64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.l.d a1, a0 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.rint.f64(double %x) %b = call i64 @llvm.fptosi.sat.i64.f64(double %a) ret i64 %b } define signext i32 @test_rint_ui32(double %x) { ; CHECKIFD-LABEL: test_rint_ui32: ; CHECKIFD: # %bb.0: ; CHECKIFD-NEXT: fcvt.wu.d a0, fa0 ; CHECKIFD-NEXT: feq.d a1, fa0, fa0 ; CHECKIFD-NEXT: seqz a1, a1 ; CHECKIFD-NEXT: addi a1, a1, -1 ; CHECKIFD-NEXT: and a0, a1, a0 ; CHECKIFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_rint_ui32: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -16 ; RV32IZFINXZDINX-NEXT: .cfi_def_cfa_offset 16 ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.wu.d a2, a0 ; RV32IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV32IZFINXZDINX-NEXT: seqz a0, a0 ; RV32IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV32IZFINXZDINX-NEXT: and a0, a0, a2 ; RV32IZFINXZDINX-NEXT: addi sp, sp, 16 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_rint_ui32: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.wu.d a1, a0 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.rint.f64(double %x) %b = call i32 @llvm.fptoui.sat.i32.f64(double %a) ret i32 %b } define i64 @test_rint_ui64(double %x) nounwind { ; RV32IFD-LABEL: test_rint_ui64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 4(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: call rint ; RV32IFD-NEXT: lui a0, %hi(.LCPI23_0) ; RV32IFD-NEXT: fld fa5, %lo(.LCPI23_0)(a0) ; RV32IFD-NEXT: flt.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s0, a0 ; RV32IFD-NEXT: fcvt.d.w fa5, zero ; RV32IFD-NEXT: fle.d a0, fa5, fa0 ; RV32IFD-NEXT: neg s1, a0 ; RV32IFD-NEXT: call __fixunsdfdi ; RV32IFD-NEXT: and a0, s1, a0 ; RV32IFD-NEXT: or a0, s0, a0 ; RV32IFD-NEXT: and a1, s1, a1 ; RV32IFD-NEXT: or a1, s0, a1 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 4(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: test_rint_ui64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: fcvt.lu.d a0, fa0 ; RV64IFD-NEXT: feq.d a1, fa0, fa0 ; RV64IFD-NEXT: seqz a1, a1 ; RV64IFD-NEXT: addi a1, a1, -1 ; RV64IFD-NEXT: and a0, a1, a0 ; RV64IFD-NEXT: ret ; ; RV32IZFINXZDINX-LABEL: test_rint_ui64: ; RV32IZFINXZDINX: # %bb.0: ; RV32IZFINXZDINX-NEXT: addi sp, sp, -32 ; RV32IZFINXZDINX-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s1, 20(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: sw s2, 16(sp) # 4-byte Folded Spill ; RV32IZFINXZDINX-NEXT: call rint ; RV32IZFINXZDINX-NEXT: sw a0, 8(sp) ; RV32IZFINXZDINX-NEXT: sw a1, 12(sp) ; RV32IZFINXZDINX-NEXT: lw s0, 8(sp) ; RV32IZFINXZDINX-NEXT: lw s1, 12(sp) ; RV32IZFINXZDINX-NEXT: fcvt.d.w a2, zero ; RV32IZFINXZDINX-NEXT: fle.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg s2, a2 ; RV32IZFINXZDINX-NEXT: call __fixunsdfdi ; RV32IZFINXZDINX-NEXT: lui a2, %hi(.LCPI23_0) ; RV32IZFINXZDINX-NEXT: lw a3, %lo(.LCPI23_0+4)(a2) ; RV32IZFINXZDINX-NEXT: lw a2, %lo(.LCPI23_0)(a2) ; RV32IZFINXZDINX-NEXT: and a0, s2, a0 ; RV32IZFINXZDINX-NEXT: flt.d a2, a2, s0 ; RV32IZFINXZDINX-NEXT: neg a2, a2 ; RV32IZFINXZDINX-NEXT: or a0, a2, a0 ; RV32IZFINXZDINX-NEXT: and a1, s2, a1 ; RV32IZFINXZDINX-NEXT: or a1, a2, a1 ; RV32IZFINXZDINX-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s1, 20(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: lw s2, 16(sp) # 4-byte Folded Reload ; RV32IZFINXZDINX-NEXT: addi sp, sp, 32 ; RV32IZFINXZDINX-NEXT: ret ; ; RV64IZFINXZDINX-LABEL: test_rint_ui64: ; RV64IZFINXZDINX: # %bb.0: ; RV64IZFINXZDINX-NEXT: fcvt.lu.d a1, a0 ; RV64IZFINXZDINX-NEXT: feq.d a0, a0, a0 ; RV64IZFINXZDINX-NEXT: seqz a0, a0 ; RV64IZFINXZDINX-NEXT: addi a0, a0, -1 ; RV64IZFINXZDINX-NEXT: and a0, a0, a1 ; RV64IZFINXZDINX-NEXT: ret %a = call double @llvm.rint.f64(double %x) %b = call i64 @llvm.fptoui.sat.i64.f64(double %a) ret i64 %b } declare double @llvm.floor.f64(double) declare double @llvm.ceil.f64(double) declare double @llvm.trunc.f64(double) declare double @llvm.round.f64(double) declare double @llvm.roundeven.f64(double) declare double @llvm.rint.f64(double) declare i32 @llvm.fptosi.sat.i32.f64(double) declare i64 @llvm.fptosi.sat.i64.f64(double) declare i32 @llvm.fptoui.sat.i32.f64(double) declare i64 @llvm.fptoui.sat.i64.f64(double)