# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 # RUN: llc -global-isel -mtriple=amdgcn-mesa-amdpal -mcpu=gfx1010 -run-pass=amdgpu-global-isel-divergence-lowering %s -o - | FileCheck -check-prefix=GFX10 %s --- | define void @divergent_i1_phi_uniform_branch() {ret void} define void @divergent_i1_phi_uniform_branch_simple() {ret void} define void @divergent_i1_phi_used_inside_loop() {ret void} define void @divergent_i1_phi_used_inside_loop_bigger_loop_body() {ret void} define void @_amdgpu_cs_main() #0 {ret void} attributes #0 = {"amdgpu-flat-work-group-size"="1,1"} ... --- name: divergent_i1_phi_uniform_branch legalized: true tracksRegLiveness: true body: | ; GFX10-LABEL: name: divergent_i1_phi_uniform_branch ; GFX10: bb.0: ; GFX10-NEXT: successors: %bb.1(0x30000000), %bb.2(0x50000000) ; GFX10-NEXT: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr0 ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr3 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr4 ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32) ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]] ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C1]] ; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.2 ; GFX10-NEXT: G_BR %bb.1 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.1: ; GFX10-NEXT: successors: %bb.3(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 123 ; GFX10-NEXT: G_STORE [[C2]](s32), [[MV1]](p1) :: (store (s32), addrspace 1) ; GFX10-NEXT: G_BR %bb.3 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.2: ; GFX10-NEXT: successors: %bb.4(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s1) = G_PHI %14(s1), %bb.3, [[ICMP]](s1), %bb.0 ; GFX10-NEXT: G_BR %bb.4 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.3: ; GFX10-NEXT: successors: %bb.2(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C3]] ; GFX10-NEXT: G_BR %bb.2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.4: ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[PHI]](s1), [[C5]], [[C4]] ; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1) ; GFX10-NEXT: S_ENDPGM 0 bb.0: successors: %bb.1(0x30000000), %bb.2(0x50000000) liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4 %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(p1) = G_MERGE_VALUES %0(s32), %1(s32) %3:_(s32) = COPY $vgpr2 %4:_(s32) = COPY $sgpr0 %5:_(s32) = COPY $vgpr3 %6:_(s32) = COPY $vgpr4 %7:_(p1) = G_MERGE_VALUES %5(s32), %6(s32) %8:_(s32) = G_CONSTANT i32 6 %9:_(s1) = G_ICMP intpred(uge), %3(s32), %8 %10:_(s32) = G_CONSTANT i32 0 %11:_(s1) = G_ICMP intpred(ne), %4(s32), %10 G_BRCOND %11(s1), %bb.2 G_BR %bb.1 bb.1: successors: %bb.3(0x80000000) %12:_(s32) = G_CONSTANT i32 123 G_STORE %12(s32), %7(p1) :: (store (s32), addrspace 1) G_BR %bb.3 bb.2: successors: %bb.4(0x80000000) %13:_(s1) = G_PHI %14(s1), %bb.3, %9(s1), %bb.0 G_BR %bb.4 bb.3: successors: %bb.2(0x80000000) %15:_(s32) = G_CONSTANT i32 1 %14:_(s1) = G_ICMP intpred(ult), %3(s32), %15 G_BR %bb.2 bb.4: %16:_(s32) = G_CONSTANT i32 2 %17:_(s32) = G_CONSTANT i32 1 %18:_(s32) = G_SELECT %13(s1), %17, %16 G_STORE %18(s32), %2(p1) :: (store (s32), addrspace 1) S_ENDPGM 0 ... --- name: divergent_i1_phi_uniform_branch_simple legalized: true tracksRegLiveness: true body: | ; GFX10-LABEL: name: divergent_i1_phi_uniform_branch_simple ; GFX10: bb.0: ; GFX10-NEXT: successors: %bb.1(0x30000000), %bb.2(0x50000000) ; GFX10-NEXT: liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p1) = G_MERGE_VALUES [[COPY]](s32), [[COPY1]](s32) ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $sgpr0 ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 6 ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(uge), [[COPY2]](s32), [[C]] ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[COPY3]](s32), [[C1]] ; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.2 ; GFX10-NEXT: G_BR %bb.1 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.1: ; GFX10-NEXT: successors: %bb.2(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(ult), [[COPY2]](s32), [[C2]] ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.2: ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s1) = G_PHI [[ICMP]](s1), %bb.0, [[ICMP2]](s1), %bb.1 ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[PHI]](s1), [[C4]], [[C3]] ; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p1) :: (store (s32), addrspace 1) ; GFX10-NEXT: S_ENDPGM 0 bb.0: successors: %bb.1(0x30000000), %bb.2(0x50000000) liveins: $sgpr0, $vgpr0, $vgpr1, $vgpr2 %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(p1) = G_MERGE_VALUES %0(s32), %1(s32) %3:_(s32) = COPY $vgpr2 %4:_(s32) = COPY $sgpr0 %5:_(s32) = G_CONSTANT i32 6 %6:_(s1) = G_ICMP intpred(uge), %3(s32), %5 %7:_(s32) = G_CONSTANT i32 0 %8:_(s1) = G_ICMP intpred(ne), %4(s32), %7 G_BRCOND %8(s1), %bb.2 G_BR %bb.1 bb.1: successors: %bb.2(0x80000000) %9:_(s32) = G_CONSTANT i32 1 %10:_(s1) = G_ICMP intpred(ult), %3(s32), %9 bb.2: %11:_(s1) = G_PHI %6(s1), %bb.0, %10(s1), %bb.1 %12:_(s32) = G_CONSTANT i32 2 %13:_(s32) = G_CONSTANT i32 1 %14:_(s32) = G_SELECT %11(s1), %13, %12 G_STORE %14(s32), %2(p1) :: (store (s32), addrspace 1) S_ENDPGM 0 ... --- name: divergent_i1_phi_used_inside_loop legalized: true tracksRegLiveness: true body: | ; GFX10-LABEL: name: divergent_i1_phi_used_inside_loop ; GFX10: bb.0: ; GFX10-NEXT: successors: %bb.1(0x80000000) ; GFX10-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY1]](s32), [[COPY2]](s32) ; GFX10-NEXT: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 true ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.1: ; GFX10-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %7(s32), %bb.1, [[C1]](s32), %bb.0 ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C1]](s32), %bb.0, %9(s32), %bb.1 ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s1) = G_PHI [[C]](s1), %bb.0, %11(s1), %bb.1 ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[PHI2]], [[C2]] ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI1]](s32) ; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]] ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C3]] ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP]](s1), [[PHI]](s32) ; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX10-NEXT: G_BR %bb.2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.2: ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s1) = G_PHI [[XOR]](s1), %bb.1 ; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.1 ; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI4]](s32) ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 ; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[PHI3]](s1), [[C5]], [[C4]] ; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p0) :: (store (s32)) ; GFX10-NEXT: SI_RETURN bb.0: successors: %bb.1(0x80000000) liveins: $vgpr0, $vgpr1, $vgpr2 %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s32) = COPY $vgpr2 %3:_(p0) = G_MERGE_VALUES %1(s32), %2(s32) %4:_(s1) = G_CONSTANT i1 true %5:_(s32) = G_CONSTANT i32 0 bb.1: successors: %bb.2(0x04000000), %bb.1(0x7c000000) %6:_(s32) = G_PHI %7(s32), %bb.1, %5(s32), %bb.0 %8:_(s32) = G_PHI %5(s32), %bb.0, %9(s32), %bb.1 %10:_(s1) = G_PHI %4(s1), %bb.0, %11(s1), %bb.1 %12:_(s1) = G_CONSTANT i1 true %11:_(s1) = G_XOR %10, %12 %13:_(s32) = G_UITOFP %8(s32) %14:_(s1) = G_FCMP floatpred(ogt), %13(s32), %0 %15:_(s32) = G_CONSTANT i32 1 %9:_(s32) = G_ADD %8, %15 %7:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), %14(s1), %6(s32) SI_LOOP %7(s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec G_BR %bb.2 bb.2: %16:_(s1) = G_PHI %11(s1), %bb.1 %17:_(s32) = G_PHI %7(s32), %bb.1 G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %17(s32) %18:_(s32) = G_FCONSTANT float 0.000000e+00 %19:_(s32) = G_FCONSTANT float 1.000000e+00 %20:_(s32) = G_SELECT %16(s1), %19, %18 G_STORE %20(s32), %3(p0) :: (store (s32)) SI_RETURN ... --- name: divergent_i1_phi_used_inside_loop_bigger_loop_body legalized: true tracksRegLiveness: true body: | ; GFX10-LABEL: name: divergent_i1_phi_used_inside_loop_bigger_loop_body ; GFX10: bb.0: ; GFX10-NEXT: successors: %bb.1(0x80000000) ; GFX10-NEXT: liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr2 ; GFX10-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $vgpr3 ; GFX10-NEXT: [[MV:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY2]](s32), [[COPY3]](s32) ; GFX10-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $vgpr4 ; GFX10-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $vgpr5 ; GFX10-NEXT: [[MV1:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY4]](s32), [[COPY5]](s32) ; GFX10-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $vgpr6 ; GFX10-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $vgpr7 ; GFX10-NEXT: [[MV2:%[0-9]+]]:_(p0) = G_MERGE_VALUES [[COPY6]](s32), [[COPY7]](s32) ; GFX10-NEXT: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 ; GFX10-NEXT: [[FCMP:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[COPY1]](s32), [[C1]] ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.1: ; GFX10-NEXT: successors: %bb.4(0x40000000), %bb.2(0x40000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %15(s32), %bb.5, [[C]](s32), %bb.0 ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s32) = G_PHI [[C]](s32), %bb.0, %17(s32), %bb.5 ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s1) = G_PHI [[FCMP]](s1), %bb.0, %19(s1), %bb.5 ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s1) = G_CONSTANT i1 true ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 1000 ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(sle), [[PHI1]](s32), [[C3]] ; GFX10-NEXT: G_BRCOND [[ICMP]](s1), %bb.4 ; GFX10-NEXT: G_BR %bb.2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.2: ; GFX10-NEXT: successors: %bb.3(0x40000000), %bb.5(0x40000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s1) = G_PHI %24(s1), %bb.4, [[C2]](s1), %bb.1 ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s1) = G_CONSTANT i1 true ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[PHI3]], [[C4]] ; GFX10-NEXT: G_BRCOND [[XOR]](s1), %bb.5 ; GFX10-NEXT: G_BR %bb.3 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.3: ; GFX10-NEXT: successors: %bb.5(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[C5:%[0-9]+]]:_(s32) = G_CONSTANT i32 1000 ; GFX10-NEXT: G_STORE [[C5]](s32), [[MV1]](p0) :: (store (s32)) ; GFX10-NEXT: G_BR %bb.5 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.4: ; GFX10-NEXT: successors: %bb.2(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[C6:%[0-9]+]]:_(s1) = G_CONSTANT i1 false ; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 1000 ; GFX10-NEXT: G_STORE [[C7]](s32), [[MV2]](p0) :: (store (s32)) ; GFX10-NEXT: G_BR %bb.2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.5: ; GFX10-NEXT: successors: %bb.6(0x04000000), %bb.1(0x7c000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[C8:%[0-9]+]]:_(s1) = G_CONSTANT i1 true ; GFX10-NEXT: [[XOR1:%[0-9]+]]:_(s1) = G_XOR [[PHI2]], [[C8]] ; GFX10-NEXT: [[UITOFP:%[0-9]+]]:_(s32) = G_UITOFP [[PHI1]](s32) ; GFX10-NEXT: [[FCMP1:%[0-9]+]]:_(s1) = G_FCMP floatpred(ogt), [[UITOFP]](s32), [[COPY]] ; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[PHI1]], [[C9]] ; GFX10-NEXT: [[INTRINSIC_CONVERGENT:%[0-9]+]]:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), [[FCMP1]](s1), [[PHI]](s32) ; GFX10-NEXT: SI_LOOP [[INTRINSIC_CONVERGENT]](s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec ; GFX10-NEXT: G_BR %bb.6 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.6: ; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s1) = G_PHI [[XOR1]](s1), %bb.5 ; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[INTRINSIC_CONVERGENT]](s32), %bb.5 ; GFX10-NEXT: G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), [[PHI5]](s32) ; GFX10-NEXT: [[C10:%[0-9]+]]:_(s32) = G_FCONSTANT float 0.000000e+00 ; GFX10-NEXT: [[C11:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00 ; GFX10-NEXT: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[PHI4]](s1), [[C11]], [[C10]] ; GFX10-NEXT: G_STORE [[SELECT]](s32), [[MV]](p0) :: (store (s32)) ; GFX10-NEXT: SI_RETURN bb.0: successors: %bb.1(0x80000000) liveins: $vgpr0, $vgpr1, $vgpr2, $vgpr3, $vgpr4, $vgpr5, $vgpr6, $vgpr7 %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 %2:_(s32) = COPY $vgpr2 %3:_(s32) = COPY $vgpr3 %4:_(p0) = G_MERGE_VALUES %2(s32), %3(s32) %5:_(s32) = COPY $vgpr4 %6:_(s32) = COPY $vgpr5 %7:_(p0) = G_MERGE_VALUES %5(s32), %6(s32) %8:_(s32) = COPY $vgpr6 %9:_(s32) = COPY $vgpr7 %10:_(p0) = G_MERGE_VALUES %8(s32), %9(s32) %11:_(s32) = G_CONSTANT i32 0 %12:_(s32) = G_FCONSTANT float 1.000000e+00 %13:_(s1) = G_FCMP floatpred(ogt), %1(s32), %12 bb.1: successors: %bb.4(0x40000000), %bb.2(0x40000000) %14:_(s32) = G_PHI %15(s32), %bb.5, %11(s32), %bb.0 %16:_(s32) = G_PHI %11(s32), %bb.0, %17(s32), %bb.5 %18:_(s1) = G_PHI %13(s1), %bb.0, %19(s1), %bb.5 %20:_(s1) = G_CONSTANT i1 true %21:_(s32) = G_CONSTANT i32 1000 %22:_(s1) = G_ICMP intpred(sle), %16(s32), %21 G_BRCOND %22(s1), %bb.4 G_BR %bb.2 bb.2: successors: %bb.3(0x40000000), %bb.5(0x40000000) %23:_(s1) = G_PHI %24(s1), %bb.4, %20(s1), %bb.1 %25:_(s1) = G_CONSTANT i1 true %26:_(s1) = G_XOR %23, %25 G_BRCOND %26(s1), %bb.5 G_BR %bb.3 bb.3: successors: %bb.5(0x80000000) %27:_(s32) = G_CONSTANT i32 1000 G_STORE %27(s32), %7(p0) :: (store (s32)) G_BR %bb.5 bb.4: successors: %bb.2(0x80000000) %24:_(s1) = G_CONSTANT i1 false %28:_(s32) = G_CONSTANT i32 1000 G_STORE %28(s32), %10(p0) :: (store (s32)) G_BR %bb.2 bb.5: successors: %bb.6(0x04000000), %bb.1(0x7c000000) %29:_(s1) = G_CONSTANT i1 true %19:_(s1) = G_XOR %18, %29 %30:_(s32) = G_UITOFP %16(s32) %31:_(s1) = G_FCMP floatpred(ogt), %30(s32), %0 %32:_(s32) = G_CONSTANT i32 1 %17:_(s32) = G_ADD %16, %32 %15:sreg_32_xm0_xexec(s32) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.if.break), %31(s1), %14(s32) SI_LOOP %15(s32), %bb.1, implicit-def $exec, implicit-def $scc, implicit $exec G_BR %bb.6 bb.6: %33:_(s1) = G_PHI %19(s1), %bb.5 %34:_(s32) = G_PHI %15(s32), %bb.5 G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.end.cf), %34(s32) %35:_(s32) = G_FCONSTANT float 0.000000e+00 %36:_(s32) = G_FCONSTANT float 1.000000e+00 %37:_(s32) = G_SELECT %33(s1), %36, %35 G_STORE %37(s32), %4(p0) :: (store (s32)) SI_RETURN ... --- name: _amdgpu_cs_main legalized: true tracksRegLiveness: true body: | ; GFX10-LABEL: name: _amdgpu_cs_main ; GFX10: bb.0: ; GFX10-NEXT: successors: %bb.1(0x40000000), %bb.2(0x40000000) ; GFX10-NEXT: liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[COPY:%[0-9]+]]:_(s32) = COPY $sgpr0 ; GFX10-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $sgpr1 ; GFX10-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $vgpr0 ; GFX10-NEXT: [[DEF:%[0-9]+]]:_(s32) = G_IMPLICIT_DEF ; GFX10-NEXT: [[INT:%[0-9]+]]:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.s.getpc) ; GFX10-NEXT: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 -4294967296 ; GFX10-NEXT: [[AND:%[0-9]+]]:_(s64) = G_AND [[INT]], [[C]] ; GFX10-NEXT: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[COPY]](s32) ; GFX10-NEXT: [[OR:%[0-9]+]]:_(s64) = G_OR [[AND]], [[ZEXT]] ; GFX10-NEXT: [[INTTOPTR:%[0-9]+]]:_(p4) = G_INTTOPTR [[OR]](s64) ; GFX10-NEXT: [[LOAD:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[INTTOPTR]](p4) :: (load (<8 x s32>)) ; GFX10-NEXT: [[BITCAST:%[0-9]+]]:_(s256) = G_BITCAST [[LOAD]](<8 x s32>) ; GFX10-NEXT: [[TRUNC:%[0-9]+]]:_(s128) = G_TRUNC [[BITCAST]](s256) ; GFX10-NEXT: [[BITCAST1:%[0-9]+]]:_(<4 x s32>) = G_BITCAST [[TRUNC]](s128) ; GFX10-NEXT: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 ; GFX10-NEXT: [[INT1:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.mbcnt.lo), [[C2]](s32), [[C1]](s32) ; GFX10-NEXT: [[INT2:%[0-9]+]]:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.mbcnt.hi), [[C2]](s32), [[INT1]](s32) ; GFX10-NEXT: [[FREEZE:%[0-9]+]]:_(s32) = G_FREEZE [[INT2]] ; GFX10-NEXT: [[C3:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX10-NEXT: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[FREEZE]], [[C3]](s32) ; GFX10-NEXT: [[AMDGPU_BUFFER_LOAD:%[0-9]+]]:_(s32) = G_AMDGPU_BUFFER_LOAD [[BITCAST1]](<4 x s32>), [[C1]](s32), [[SHL]], [[C1]], 0, 0, 0 :: (load (s32), align 1, addrspace 8) ; GFX10-NEXT: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[AMDGPU_BUFFER_LOAD]](s32), [[C1]] ; GFX10-NEXT: [[C4:%[0-9]+]]:_(s32) = G_CONSTANT i32 1 ; GFX10-NEXT: [[AND1:%[0-9]+]]:_(s32) = G_AND [[FREEZE]], [[C4]] ; GFX10-NEXT: [[TRUNC1:%[0-9]+]]:_(s1) = G_TRUNC [[AND1]](s32) ; GFX10-NEXT: [[C5:%[0-9]+]]:_(s1) = G_CONSTANT i1 true ; GFX10-NEXT: [[XOR:%[0-9]+]]:_(s1) = G_XOR [[TRUNC1]], [[C5]] ; GFX10-NEXT: G_BRCOND [[XOR]](s1), %bb.2 ; GFX10-NEXT: G_BR %bb.1 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.1: ; GFX10-NEXT: successors: %bb.3(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[C6:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: G_BR %bb.3 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.2: ; GFX10-NEXT: successors: %bb.5(0x40000000), %bb.6(0x40000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[PHI:%[0-9]+]]:_(s32) = G_PHI %30(s32), %bb.4, [[DEF]](s32), %bb.0 ; GFX10-NEXT: [[PHI1:%[0-9]+]]:_(s1) = G_PHI %32(s1), %bb.4, [[C5]](s1), %bb.0 ; GFX10-NEXT: G_BRCOND [[PHI1]](s1), %bb.5 ; GFX10-NEXT: G_BR %bb.6 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.3: ; GFX10-NEXT: successors: %bb.4(0x04000000), %bb.3(0x7c000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[PHI2:%[0-9]+]]:_(s32) = G_PHI %34(s32), %bb.3, [[C6]](s32), %bb.1 ; GFX10-NEXT: [[PHI3:%[0-9]+]]:_(s32) = G_PHI %36(s32), %bb.3, [[FREEZE]](s32), %bb.1 ; GFX10-NEXT: [[PHI4:%[0-9]+]]:_(s32) = G_PHI %38(s32), %bb.3, [[C6]](s32), %bb.1 ; GFX10-NEXT: [[C7:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: [[AMDGPU_BUFFER_LOAD1:%[0-9]+]]:_(s32) = G_AMDGPU_BUFFER_LOAD [[BITCAST1]](<4 x s32>), [[C7]](s32), [[PHI2]], [[C7]], 0, 0, 0 :: (load (s32), align 1, addrspace 8) ; GFX10-NEXT: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[AMDGPU_BUFFER_LOAD1]], [[PHI4]] ; GFX10-NEXT: [[C8:%[0-9]+]]:_(s32) = G_CONSTANT i32 -1 ; GFX10-NEXT: [[ADD1:%[0-9]+]]:_(s32) = G_ADD [[PHI3]], [[C8]] ; GFX10-NEXT: [[C9:%[0-9]+]]:_(s32) = G_CONSTANT i32 4 ; GFX10-NEXT: [[ADD2:%[0-9]+]]:_(s32) = G_ADD [[PHI2]], [[C9]] ; GFX10-NEXT: [[ICMP1:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[ADD1]](s32), [[C7]] ; GFX10-NEXT: G_BRCOND [[ICMP1]](s1), %bb.3 ; GFX10-NEXT: G_BR %bb.4 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.4: ; GFX10-NEXT: successors: %bb.2(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[PHI5:%[0-9]+]]:_(s32) = G_PHI [[ADD]](s32), %bb.3 ; GFX10-NEXT: [[C10:%[0-9]+]]:_(s1) = G_CONSTANT i1 false ; GFX10-NEXT: [[ICMP2:%[0-9]+]]:_(s1) = G_ICMP intpred(eq), [[PHI5]](s32), [[AMDGPU_BUFFER_LOAD]] ; GFX10-NEXT: [[OR1:%[0-9]+]]:_(s1) = G_OR [[ICMP]], [[ICMP2]] ; GFX10-NEXT: [[ZEXT1:%[0-9]+]]:_(s32) = G_ZEXT [[OR1]](s1) ; GFX10-NEXT: G_BR %bb.2 ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.5: ; GFX10-NEXT: successors: %bb.6(0x80000000) ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: [[ZEXT2:%[0-9]+]]:_(s32) = G_ZEXT [[ICMP]](s1) ; GFX10-NEXT: [[C11:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX10-NEXT: [[OR2:%[0-9]+]]:_(s32) = G_OR [[ZEXT2]], [[C11]] ; GFX10-NEXT: {{ $}} ; GFX10-NEXT: bb.6: ; GFX10-NEXT: [[PHI6:%[0-9]+]]:_(s32) = G_PHI [[PHI]](s32), %bb.2, [[OR2]](s32), %bb.5 ; GFX10-NEXT: [[UV:%[0-9]+]]:_(<4 x s32>), [[UV1:%[0-9]+]]:_(<4 x s32>) = G_UNMERGE_VALUES [[LOAD]](<8 x s32>) ; GFX10-NEXT: [[ADD3:%[0-9]+]]:_(s32) = G_ADD [[COPY2]], [[COPY1]] ; GFX10-NEXT: [[C12:%[0-9]+]]:_(s32) = G_CONSTANT i32 2 ; GFX10-NEXT: [[SHL1:%[0-9]+]]:_(s32) = G_SHL [[ADD3]], [[C12]](s32) ; GFX10-NEXT: [[C13:%[0-9]+]]:_(s32) = G_CONSTANT i32 0 ; GFX10-NEXT: G_AMDGPU_BUFFER_STORE [[PHI6]](s32), [[UV1]](<4 x s32>), [[C13]](s32), [[SHL1]], [[C13]], 0, 0, 0 :: (store (s32), align 1, addrspace 8) ; GFX10-NEXT: S_ENDPGM 0 bb.0: successors: %bb.1(0x40000000), %bb.2(0x40000000) liveins: $sgpr0, $sgpr1, $sgpr2, $sgpr3, $vgpr0, $vgpr1, $vgpr2 %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s32) = COPY $vgpr0 %3:_(s32) = G_IMPLICIT_DEF %4:_(s64) = G_INTRINSIC intrinsic(@llvm.amdgcn.s.getpc) %5:_(s64) = G_CONSTANT i64 -4294967296 %6:_(s64) = G_AND %4, %5 %7:_(s64) = G_ZEXT %0(s32) %8:_(s64) = G_OR %6, %7 %9:_(p4) = G_INTTOPTR %8(s64) %10:_(<8 x s32>) = G_LOAD %9(p4) :: (load (<8 x s32>)) %11:_(s256) = G_BITCAST %10(<8 x s32>) %12:_(s128) = G_TRUNC %11(s256) %13:_(<4 x s32>) = G_BITCAST %12(s128) %15:_(s32) = G_CONSTANT i32 0 %14:_(s32) = G_CONSTANT i32 -1 %16:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.mbcnt.lo), %14(s32), %15(s32) %17:_(s32) = G_INTRINSIC intrinsic(@llvm.amdgcn.mbcnt.hi), %14(s32), %16(s32) %18:_(s32) = G_FREEZE %17 %19:_(s32) = G_CONSTANT i32 2 %20:_(s32) = G_SHL %18, %19(s32) %21:_(s32) = G_AMDGPU_BUFFER_LOAD %13(<4 x s32>), %15(s32), %20, %15, 0, 0, 0 :: (load (s32), align 1, addrspace 8) %22:_(s1) = G_ICMP intpred(eq), %21(s32), %15 %23:_(s32) = G_CONSTANT i32 1 %24:_(s32) = G_AND %18, %23 %25:_(s1) = G_TRUNC %24(s32) %26:_(s1) = G_CONSTANT i1 true %27:_(s1) = G_XOR %25, %26 G_BRCOND %27(s1), %bb.2 G_BR %bb.1 bb.1: successors: %bb.3(0x80000000) %28:_(s32) = G_CONSTANT i32 0 G_BR %bb.3 bb.2: successors: %bb.5(0x40000000), %bb.6(0x40000000) %29:_(s32) = G_PHI %30(s32), %bb.4, %3(s32), %bb.0 %31:_(s1) = G_PHI %32(s1), %bb.4, %26(s1), %bb.0 G_BRCOND %31(s1), %bb.5 G_BR %bb.6 bb.3: successors: %bb.4(0x04000000), %bb.3(0x7c000000) %33:_(s32) = G_PHI %34(s32), %bb.3, %28(s32), %bb.1 %35:_(s32) = G_PHI %36(s32), %bb.3, %18(s32), %bb.1 %37:_(s32) = G_PHI %38(s32), %bb.3, %28(s32), %bb.1 %39:_(s32) = G_CONSTANT i32 0 %40:_(s32) = G_AMDGPU_BUFFER_LOAD %13(<4 x s32>), %39(s32), %33, %39, 0, 0, 0 :: (load (s32), align 1, addrspace 8) %38:_(s32) = G_ADD %40, %37 %41:_(s32) = G_CONSTANT i32 -1 %36:_(s32) = G_ADD %35, %41 %42:_(s32) = G_CONSTANT i32 4 %34:_(s32) = G_ADD %33, %42 %43:_(s1) = G_ICMP intpred(ne), %36(s32), %39 G_BRCOND %43(s1), %bb.3 G_BR %bb.4 bb.4: successors: %bb.2(0x80000000) %44:_(s32) = G_PHI %38(s32), %bb.3 %32:_(s1) = G_CONSTANT i1 false %45:_(s1) = G_ICMP intpred(eq), %44(s32), %21 %46:_(s1) = G_OR %22, %45 %30:_(s32) = G_ZEXT %46(s1) G_BR %bb.2 bb.5: successors: %bb.6(0x80000000) %47:_(s32) = G_ZEXT %22(s1) %48:_(s32) = G_CONSTANT i32 2 %49:_(s32) = G_OR %47, %48 bb.6: %50:_(s32) = G_PHI %29(s32), %bb.2, %49(s32), %bb.5 %51:_(<4 x s32>), %52:_(<4 x s32>) = G_UNMERGE_VALUES %10(<8 x s32>) %53:_(s32) = G_ADD %2, %1 %54:_(s32) = G_CONSTANT i32 2 %55:_(s32) = G_SHL %53, %54(s32) %56:_(s32) = G_CONSTANT i32 0 G_AMDGPU_BUFFER_STORE %50(s32), %52(<4 x s32>), %56(s32), %55, %56, 0, 0, 0 :: (store (s32), align 1, addrspace 8) S_ENDPGM 0 ...