; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -passes=instcombine %s | FileCheck %s @var = external global i32, align 4 ; fcmp olt fabs(x), smallest_normalized_number -> fcmp oeq x, 0.0 ; https://alive2.llvm.org/ce/z/fib8cf define void @denormal_input_preserve_sign_fcmp_olt_smallest_normalized(float %f32, double %f64, half %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_olt_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp oeq float [[F32:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp oeq double [[F64:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp oeq half [[F16:%.*]], 0xH0000 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF32_FLAGS:%.*]] = fcmp oeq float [[F32]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF32_FLAGS]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp olt float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp olt double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp olt half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var %f32.fabs.flags = call nsz nnan float @llvm.fabs.f32(float %f32) %cmpf32.flags = fcmp olt float %f32.fabs.flags, 0x3810000000000000 store volatile i1 %cmpf32.flags, ptr @var ret void } ; fcmp uge fabs(x), smallest_normalized_number -> fcmp une x, 0.0 ; https://alive2.llvm.org/ce/z/xmqBXx define void @denormal_input_preserve_sign_fcmp_uge_smallest_normalized(float %f32, double %f64, half %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_uge_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp une float [[F32:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp une double [[F64:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp une half [[F16:%.*]], 0xH0000 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp uge float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp uge double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp uge half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } ; fcmp oge fabs(x), smallest_normalized_number -> fcmp one x, 0.0 ; https://alive2.llvm.org/ce/z/ZucNzF define void @denormal_input_preserve_sign_fcmp_oge_smallest_normalized(float %f32, double %f64, half %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_oge_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp one float [[F32:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp one double [[F64:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp one half [[F16:%.*]], 0xH0000 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp oge float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp oge double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp oge half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } ; fcmp ult fabs(x), smallest_normalized_number -> fcmp ueq x, 0.0 ; https://alive2.llvm.org/ce/z/csAhZ2 define void @denormal_input_preserve_sign_fcmp_ult_smallest_normalized(float %f32, double %f64, half %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_fcmp_ult_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp ueq float [[F32:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp ueq double [[F64:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp ueq half [[F16:%.*]], 0xH0000 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp ult float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp ult double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp ult half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } define void @denormal_input_preserve_sign_vector_fcmp_olt_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_olt_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp oeq <2 x float> [[F32:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp oeq <2 x double> [[F64:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp oeq <2 x half> [[F16:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32) %cmpf32 = fcmp olt <2 x float> %f32.fabs, store volatile <2 x i1> %cmpf32, ptr @var %f64.fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %f64) %cmpf64 = fcmp olt <2 x double> %f64.fabs, store volatile <2 x i1> %cmpf64, ptr @var %f16.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %f16) %cmpf16 = fcmp olt <2 x half> %f16.fabs, store volatile <2 x i1> %cmpf16, ptr @var ret void } define void @denormal_input_preserve_sign_vector_fcmp_uge_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_uge_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp une <2 x float> [[F32:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp une <2 x double> [[F64:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp une <2 x half> [[F16:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32) %cmpf32 = fcmp uge <2 x float> %f32.fabs, store volatile <2 x i1> %cmpf32, ptr @var %f64.fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %f64) %cmpf64 = fcmp uge <2 x double> %f64.fabs, store volatile <2 x i1> %cmpf64, ptr @var %f16.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %f16) %cmpf16 = fcmp uge <2 x half> %f16.fabs, store volatile <2 x i1> %cmpf16, ptr @var ret void } define void @denormal_input_preserve_sign_vector_fcmp_oge_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_oge_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp one <2 x float> [[F32:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp one <2 x double> [[F64:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp one <2 x half> [[F16:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32) %cmpf32 = fcmp oge <2 x float> %f32.fabs, store volatile <2 x i1> %cmpf32, ptr @var %f64.fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %f64) %cmpf64 = fcmp oge <2 x double> %f64.fabs, store volatile <2 x i1> %cmpf64, ptr @var %f16.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %f16) %cmpf16 = fcmp oge <2 x half> %f16.fabs, store volatile <2 x i1> %cmpf16, ptr @var ret void } define void @denormal_input_preserve_sign_vector_fcmp_ult_smallest_normalized(<2 x float> %f32, <2 x double> %f64, <2 x half> %f16) #0 { ; CHECK-LABEL: @denormal_input_preserve_sign_vector_fcmp_ult_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp ueq <2 x float> [[F32:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp ueq <2 x double> [[F64:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp ueq <2 x half> [[F16:%.*]], zeroinitializer ; CHECK-NEXT: store volatile <2 x i1> [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call <2 x float> @llvm.fabs.v2f32(<2 x float> %f32) %cmpf32 = fcmp ult <2 x float> %f32.fabs, store volatile <2 x i1> %cmpf32, ptr @var %f64.fabs = call <2 x double> @llvm.fabs.v2f64(<2 x double> %f64) %cmpf64 = fcmp ult <2 x double> %f64.fabs, store volatile <2 x i1> %cmpf64, ptr @var %f16.fabs = call <2 x half> @llvm.fabs.v2f16(<2 x half> %f16) %cmpf16 = fcmp ult <2 x half> %f16.fabs, store volatile <2 x i1> %cmpf16, ptr @var ret void } ; fcmp olt fabs(x), smallest_normalized_number -> fcmp oeq x, 0.0 ; https://alive2.llvm.org/ce/z/mpduXS define void @denormal_input_positive_zero_fcmp_olt_smallest_normalized(float %f32, double %f64, half %f16) #1 { ; CHECK-LABEL: @denormal_input_positive_zero_fcmp_olt_smallest_normalized( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp oeq float [[F32:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp oeq double [[F64:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp oeq half [[F16:%.*]], 0xH0000 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp olt float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp olt double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp olt half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } ; Should not fold with IEEE inputs. define void @denormal_input_ieee(float %f32, double %f64, half %f16) #2 { ; CHECK-LABEL: @denormal_input_ieee( ; CHECK-NEXT: [[F32_FABS:%.*]] = call float @llvm.fabs.f32(float [[F32:%.*]]) ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp olt float [[F32_FABS]], 0x3810000000000000 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[F64_FABS:%.*]] = call double @llvm.fabs.f64(double [[F64:%.*]]) ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp olt double [[F64_FABS]], 0x10000000000000 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[F16_FABS:%.*]] = call half @llvm.fabs.f16(half [[F16:%.*]]) ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp olt half [[F16_FABS]], 0xH0400 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp olt float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp olt double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp olt half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } ; Only f32 case should fold. define void @denormal_input_preserve_sign_f32_only(float %f32, double %f64, half %f16) #3 { ; CHECK-LABEL: @denormal_input_preserve_sign_f32_only( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp oeq float [[F32:%.*]], 0.000000e+00 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[F64_FABS:%.*]] = call double @llvm.fabs.f64(double [[F64:%.*]]) ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp olt double [[F64_FABS]], 0x10000000000000 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[F16_FABS:%.*]] = call half @llvm.fabs.f16(half [[F16:%.*]]) ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp olt half [[F16_FABS]], 0xH0400 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp olt float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp olt double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp olt half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } define void @wrong_fcmp_type_ole(float %f32, double %f64, half %f16) #0 { ; CHECK-LABEL: @wrong_fcmp_type_ole( ; CHECK-NEXT: [[F32_FABS:%.*]] = call float @llvm.fabs.f32(float [[F32:%.*]]) ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp ole float [[F32_FABS]], 0x3810000000000000 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[F64_FABS:%.*]] = call double @llvm.fabs.f64(double [[F64:%.*]]) ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp ole double [[F64_FABS]], 0x10000000000000 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[F16_FABS:%.*]] = call half @llvm.fabs.f16(half [[F16:%.*]]) ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp ole half [[F16_FABS]], 0xH0400 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %f32.fabs = call float @llvm.fabs.f32(float %f32) %cmpf32 = fcmp ole float %f32.fabs, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %f64.fabs = call double @llvm.fabs.f64(double %f64) %cmpf64 = fcmp ole double %f64.fabs, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %f16.fabs = call half @llvm.fabs.f16(half %f16) %cmpf16 = fcmp ole half %f16.fabs, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } define void @missing_fabs(float %f32, double %f64, half %f16) #0 { ; CHECK-LABEL: @missing_fabs( ; CHECK-NEXT: [[CMPF32:%.*]] = fcmp olt float [[F32:%.*]], 0x3810000000000000 ; CHECK-NEXT: store volatile i1 [[CMPF32]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF64:%.*]] = fcmp olt double [[F64:%.*]], 0x10000000000000 ; CHECK-NEXT: store volatile i1 [[CMPF64]], ptr @var, align 1 ; CHECK-NEXT: [[CMPF16:%.*]] = fcmp olt half [[F16:%.*]], 0xH0400 ; CHECK-NEXT: store volatile i1 [[CMPF16]], ptr @var, align 1 ; CHECK-NEXT: ret void ; %cmpf32 = fcmp olt float %f32, 0x3810000000000000 store volatile i1 %cmpf32, ptr @var %cmpf64 = fcmp olt double %f64, 0x10000000000000 store volatile i1 %cmpf64, ptr @var %cmpf16 = fcmp olt half %f16, 0xH0400 store volatile i1 %cmpf16, ptr @var ret void } declare float @llvm.fabs.f32(float) declare <2 x float> @llvm.fabs.v2f32(<2 x float>) declare half @llvm.fabs.f16(half) declare <2 x half> @llvm.fabs.v2f16(<2 x half>) declare double @llvm.fabs.f64(double) declare <2 x double> @llvm.fabs.v2f64(<2 x double>) attributes #0 = { "denormal-fp-math"="ieee,preserve-sign" } attributes #1 = { "denormal-fp-math"="ieee,positive-zero" } attributes #2 = { "denormal-fp-math"="ieee,iee" } attributes #3 = { "denormal-fp-math-f32"="ieee,preserve-sign" }