; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV32 ; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s \ ; RUN: | FileCheck %s -check-prefixes=RV64 ;Copy tests from llvm/tests/Transforms/CodeGenPrepare/X86/overflow-intrinsics.ll ;to test shouldFormOverflowOp on RISCV define i64 @uaddo1_overflow_used(i64 %a, i64 %b) nounwind ssp { ; RV32-LABEL: uaddo1_overflow_used: ; RV32: # %bb.0: ; RV32-NEXT: add a5, a3, a1 ; RV32-NEXT: add a4, a2, a0 ; RV32-NEXT: sltu a6, a4, a2 ; RV32-NEXT: add a5, a5, a6 ; RV32-NEXT: beq a5, a1, .LBB0_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: sltu a0, a5, a1 ; RV32-NEXT: beqz a0, .LBB0_3 ; RV32-NEXT: j .LBB0_4 ; RV32-NEXT: .LBB0_2: ; RV32-NEXT: sltu a0, a4, a0 ; RV32-NEXT: bnez a0, .LBB0_4 ; RV32-NEXT: .LBB0_3: ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB0_4: ; RV32-NEXT: neg a1, a0 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo1_overflow_used: ; RV64: # %bb.0: ; RV64-NEXT: add a2, a1, a0 ; RV64-NEXT: bltu a2, a0, .LBB0_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB0_2: ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret %add = add i64 %b, %a %cmp = icmp ult i64 %add, %a %Q = select i1 %cmp, i64 %b, i64 42 ret i64 %Q } define i64 @uaddo1_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp { ; RV32-LABEL: uaddo1_math_overflow_used: ; RV32: # %bb.0: ; RV32-NEXT: add a5, a3, a1 ; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: sltu a1, a0, a2 ; RV32-NEXT: add a5, a5, a1 ; RV32-NEXT: beq a5, a3, .LBB1_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: sltu a1, a5, a3 ; RV32-NEXT: .LBB1_2: ; RV32-NEXT: bnez a1, .LBB1_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB1_4: ; RV32-NEXT: neg a1, a1 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo1_math_overflow_used: ; RV64: # %bb.0: ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: bltu a0, a1, .LBB1_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB1_2: ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret %add = add i64 %b, %a %cmp = icmp ult i64 %add, %a %Q = select i1 %cmp, i64 %b, i64 42 store i64 %add, ptr %res ret i64 %Q } define i64 @uaddo2_overflow_used(i64 %a, i64 %b) nounwind ssp { ; RV32-LABEL: uaddo2_overflow_used: ; RV32: # %bb.0: ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: add a1, a1, a0 ; RV32-NEXT: beq a1, a3, .LBB2_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: .LBB2_2: ; RV32-NEXT: bnez a0, .LBB2_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB2_4: ; RV32-NEXT: neg a1, a0 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo2_overflow_used: ; RV64: # %bb.0: ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: bltu a0, a1, .LBB2_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB2_2: ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret %add = add i64 %b, %a %cmp = icmp ult i64 %add, %b %Q = select i1 %cmp, i64 %b, i64 42 ret i64 %Q } define i64 @uaddo2_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp { ; RV32-LABEL: uaddo2_math_overflow_used: ; RV32: # %bb.0: ; RV32-NEXT: add a5, a3, a1 ; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: sltu a1, a0, a2 ; RV32-NEXT: add a5, a5, a1 ; RV32-NEXT: beq a5, a3, .LBB3_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: sltu a1, a5, a3 ; RV32-NEXT: .LBB3_2: ; RV32-NEXT: bnez a1, .LBB3_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB3_4: ; RV32-NEXT: neg a1, a1 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo2_math_overflow_used: ; RV64: # %bb.0: ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: bltu a0, a1, .LBB3_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB3_2: ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret %add = add i64 %b, %a %cmp = icmp ult i64 %add, %b %Q = select i1 %cmp, i64 %b, i64 42 store i64 %add, ptr %res ret i64 %Q } define i64 @uaddo3_overflow_used(i64 %a, i64 %b) nounwind ssp { ; RV32-LABEL: uaddo3_overflow_used: ; RV32: # %bb.0: ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: add a1, a1, a0 ; RV32-NEXT: beq a3, a1, .LBB4_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: .LBB4_2: ; RV32-NEXT: bnez a0, .LBB4_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB4_4: ; RV32-NEXT: neg a1, a0 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo3_overflow_used: ; RV64: # %bb.0: ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: bltu a0, a1, .LBB4_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB4_2: ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret %add = add i64 %b, %a %cmp = icmp ugt i64 %b, %add %Q = select i1 %cmp, i64 %b, i64 42 ret i64 %Q } define i64 @uaddo3_math_overflow_used(i64 %a, i64 %b, ptr %res) nounwind ssp { ; RV32-LABEL: uaddo3_math_overflow_used: ; RV32: # %bb.0: ; RV32-NEXT: add a5, a3, a1 ; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: sltu a1, a0, a2 ; RV32-NEXT: add a5, a5, a1 ; RV32-NEXT: beq a5, a3, .LBB5_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: sltu a1, a5, a3 ; RV32-NEXT: .LBB5_2: ; RV32-NEXT: bnez a1, .LBB5_4 ; RV32-NEXT: # %bb.3: ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB5_4: ; RV32-NEXT: neg a1, a1 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: sw a0, 0(a4) ; RV32-NEXT: sw a5, 4(a4) ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo3_math_overflow_used: ; RV64: # %bb.0: ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: bltu a0, a1, .LBB5_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB5_2: ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret %add = add i64 %b, %a %cmp = icmp ugt i64 %b, %add %Q = select i1 %cmp, i64 %b, i64 42 store i64 %add, ptr %res ret i64 %Q } ; TODO? CGP sinks the compare before we have a chance to form the overflow intrinsic. define i64 @uaddo4(i64 %a, i64 %b, i1 %c) nounwind ssp { ; RV32-LABEL: uaddo4: ; RV32: # %bb.0: # %entry ; RV32-NEXT: andi a4, a4, 1 ; RV32-NEXT: beqz a4, .LBB6_6 ; RV32-NEXT: # %bb.1: # %next ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: add a0, a2, a0 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: add a1, a1, a0 ; RV32-NEXT: beq a3, a1, .LBB6_3 ; RV32-NEXT: # %bb.2: # %next ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: .LBB6_3: # %next ; RV32-NEXT: bnez a0, .LBB6_5 ; RV32-NEXT: # %bb.4: # %next ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB6_5: # %next ; RV32-NEXT: neg a1, a0 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; RV32-NEXT: .LBB6_6: # %exit ; RV32-NEXT: li a0, 0 ; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo4: ; RV64: # %bb.0: # %entry ; RV64-NEXT: andi a2, a2, 1 ; RV64-NEXT: beqz a2, .LBB6_4 ; RV64-NEXT: # %bb.1: # %next ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: bltu a0, a1, .LBB6_3 ; RV64-NEXT: # %bb.2: # %next ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB6_3: # %next ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret ; RV64-NEXT: .LBB6_4: # %exit ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret entry: %add = add i64 %b, %a %cmp = icmp ugt i64 %b, %add br i1 %c, label %next, label %exit next: %Q = select i1 %cmp, i64 %b, i64 42 ret i64 %Q exit: ret i64 0 } define i64 @uaddo5(i64 %a, i64 %b, ptr %ptr, i1 %c) nounwind ssp { ; RV32-LABEL: uaddo5: ; RV32: # %bb.0: # %entry ; RV32-NEXT: andi a5, a5, 1 ; RV32-NEXT: add a1, a3, a1 ; RV32-NEXT: add a6, a2, a0 ; RV32-NEXT: sltu a0, a6, a2 ; RV32-NEXT: add a1, a1, a0 ; RV32-NEXT: sw a6, 0(a4) ; RV32-NEXT: sw a1, 4(a4) ; RV32-NEXT: beqz a5, .LBB7_6 ; RV32-NEXT: # %bb.1: # %next ; RV32-NEXT: beq a3, a1, .LBB7_3 ; RV32-NEXT: # %bb.2: # %next ; RV32-NEXT: sltu a0, a1, a3 ; RV32-NEXT: .LBB7_3: # %next ; RV32-NEXT: bnez a0, .LBB7_5 ; RV32-NEXT: # %bb.4: # %next ; RV32-NEXT: li a2, 42 ; RV32-NEXT: .LBB7_5: # %next ; RV32-NEXT: neg a1, a0 ; RV32-NEXT: and a1, a1, a3 ; RV32-NEXT: mv a0, a2 ; RV32-NEXT: ret ; RV32-NEXT: .LBB7_6: # %exit ; RV32-NEXT: li a0, 0 ; RV32-NEXT: li a1, 0 ; RV32-NEXT: ret ; ; RV64-LABEL: uaddo5: ; RV64: # %bb.0: # %entry ; RV64-NEXT: andi a3, a3, 1 ; RV64-NEXT: add a0, a1, a0 ; RV64-NEXT: sd a0, 0(a2) ; RV64-NEXT: beqz a3, .LBB7_4 ; RV64-NEXT: # %bb.1: # %next ; RV64-NEXT: bltu a0, a1, .LBB7_3 ; RV64-NEXT: # %bb.2: # %next ; RV64-NEXT: li a1, 42 ; RV64-NEXT: .LBB7_3: # %next ; RV64-NEXT: mv a0, a1 ; RV64-NEXT: ret ; RV64-NEXT: .LBB7_4: # %exit ; RV64-NEXT: li a0, 0 ; RV64-NEXT: ret entry: %add = add i64 %b, %a store i64 %add, ptr %ptr %cmp = icmp ugt i64 %b, %add br i1 %c, label %next, label %exit next: %Q = select i1 %cmp, i64 %b, i64 42 ret i64 %Q exit: ret i64 0 } ; Instcombine folds (a + b