; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --check-attributes --check-globals ; RUN: opt -S -passes=inline,instcombine < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" ; TODO: We can only lower to constrained intrinsics when the necessary code ; generation support for scalable vector strict operations exists. define @replace_fadd_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @replace_fadd_intrinsic_double_strictfp( ; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2:[0-9]+]] ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.fadd.u.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] ; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 %2 = tail call @llvm.aarch64.sve.fadd.nxv2f64( %1, %a, %b) #1 ret %2 } ; NOTE: IRBuilder::CreateBinOp doesn't emit constrained operations directly so ; rely on function inlining to showcase the problematic transformation. define @call_replace_fadd_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @call_replace_fadd_intrinsic_double_strictfp( ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] ; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.fadd.u.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] ; CHECK-NEXT: ret [[TMP2]] ; %1 = call @replace_fadd_intrinsic_double_strictfp( %a, %b) #1 ret %1 } ; TODO: We can only lower to constrained intrinsics when the necessary code ; generation support for scalable vector strict operations exists. define @replace_fmul_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @replace_fmul_intrinsic_double_strictfp( ; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.fmul.u.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] ; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 %2 = tail call @llvm.aarch64.sve.fmul.nxv2f64( %1, %a, %b) #1 ret %2 } ; NOTE: IRBuilder::CreateBinOp doesn't emit constrained operations directly so ; rely on function inlining to showcase the problematic transformation. define @call_replace_fmul_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @call_replace_fmul_intrinsic_double_strictfp( ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] ; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.fmul.u.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] ; CHECK-NEXT: ret [[TMP2]] ; %1 = call @replace_fmul_intrinsic_double_strictfp( %a, %b) #1 ret %1 } ; TODO: We can only lower to constrained intrinsics when the necessary code ; generation support for scalable vector strict operations exists. define @replace_fsub_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @replace_fsub_intrinsic_double_strictfp( ; CHECK-NEXT: [[TMP1:%.*]] = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] ; CHECK-NEXT: [[TMP2:%.*]] = tail call @llvm.aarch64.sve.fsub.u.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] ; CHECK-NEXT: ret [[TMP2]] ; %1 = tail call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #1 %2 = tail call @llvm.aarch64.sve.fsub.nxv2f64( %1, %a, %b) #1 ret %2 } ; NOTE: IRBuilder::CreateBinOp doesn't emit constrained operations directly so ; rely on function inlining to showcase the problematic transformation. define @call_replace_fsub_intrinsic_double_strictfp( %a, %b) #0 { ; CHECK: Function Attrs: strictfp ; CHECK-LABEL: @call_replace_fsub_intrinsic_double_strictfp( ; CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.ptrue.nxv2i1(i32 31) #[[ATTR2]] ; CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.fsub.u.nxv2f64( [[TMP1]], [[A:%.*]], [[B:%.*]]) #[[ATTR2]] ; CHECK-NEXT: ret [[TMP2]] ; %1 = call @replace_fsub_intrinsic_double_strictfp( %a, %b) #1 ret %1 } declare @llvm.aarch64.sve.fadd.nxv2f64(, , ) declare @llvm.aarch64.sve.fmul.nxv2f64(, , ) declare @llvm.aarch64.sve.fsub.nxv2f64(, , ) declare @llvm.aarch64.sve.ptrue.nxv2i1(i32) attributes #0 = { "target-features"="+sve" strictfp } attributes #1 = { strictfp } ;. ; CHECK: attributes #[[ATTR0:[0-9]+]] = { strictfp "target-features"="+sve" } ; CHECK: attributes #[[ATTR1:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(none) } ; CHECK: attributes #[[ATTR2]] = { strictfp } ;.