150 lines
6 KiB
YAML
150 lines
6 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=amdgcn -mcpu=gfx1100 -mattr="+wavefrontsize64" -run-pass=instruction-select -verify-machineinstrs -global-isel %s -o - | FileCheck %s
|
|
|
|
---
|
|
name: fcmp_false_f16
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $vgpr0, $vgpr1
|
|
; CHECK-LABEL: name: fcmp_false_f16
|
|
; CHECK: liveins: $vgpr0, $vgpr1
|
|
; CHECK-NEXT: {{ $}}
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK-NEXT: [[V_CVT_F16_F32_t16_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CVT_F16_F32_t16_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CMP_F_F16_t16_e64_:%[0-9]+]]:sreg_64 = V_CMP_F_F16_t16_e64 0, [[V_CVT_F16_F32_t16_e64_]], 0, [[V_CVT_F16_F32_t16_e64_1]], 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_CMP_F_F16_t16_e64_]]
|
|
%0:vgpr(s32) = COPY $vgpr0
|
|
%1:vgpr(s32) = COPY $vgpr1
|
|
%2:vgpr(s16) = G_FPTRUNC %0
|
|
%3:vgpr(s16) = G_FPTRUNC %1
|
|
%4:sgpr(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.fcmp), %2, %3, 0
|
|
S_ENDPGM 0, implicit %4
|
|
...
|
|
|
|
---
|
|
name: fcmp_true_f16
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $vgpr0, $vgpr1
|
|
; CHECK-LABEL: name: fcmp_true_f16
|
|
; CHECK: liveins: $vgpr0, $vgpr1
|
|
; CHECK-NEXT: {{ $}}
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK-NEXT: [[V_CVT_F16_F32_t16_e64_:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CVT_F16_F32_t16_e64_1:%[0-9]+]]:vgpr_32 = nofpexcept V_CVT_F16_F32_t16_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CMP_TRU_F16_t16_e64_:%[0-9]+]]:sreg_64 = V_CMP_TRU_F16_t16_e64 0, [[V_CVT_F16_F32_t16_e64_]], 0, [[V_CVT_F16_F32_t16_e64_1]], 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_CMP_TRU_F16_t16_e64_]]
|
|
%0:vgpr(s32) = COPY $vgpr0
|
|
%1:vgpr(s32) = COPY $vgpr1
|
|
%2:vgpr(s16) = G_FPTRUNC %0
|
|
%3:vgpr(s16) = G_FPTRUNC %1
|
|
%4:sgpr(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.fcmp), %2, %3, 15
|
|
S_ENDPGM 0, implicit %4
|
|
...
|
|
|
|
---
|
|
name: fcmp_false_f32
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $vgpr0, $vgpr1
|
|
; CHECK-LABEL: name: fcmp_false_f32
|
|
; CHECK: liveins: $vgpr0, $vgpr1
|
|
; CHECK-NEXT: {{ $}}
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK-NEXT: [[V_CMP_F_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_F_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_CMP_F_F32_e64_]]
|
|
%0:vgpr(s32) = COPY $vgpr0
|
|
%1:vgpr(s32) = COPY $vgpr1
|
|
%4:sgpr(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.fcmp), %0, %1, 0
|
|
S_ENDPGM 0, implicit %4
|
|
...
|
|
|
|
---
|
|
name: fcmp_true_f32
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $vgpr0, $vgpr1
|
|
; CHECK-LABEL: name: fcmp_true_f32
|
|
; CHECK: liveins: $vgpr0, $vgpr1
|
|
; CHECK-NEXT: {{ $}}
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK-NEXT: [[V_CMP_TRU_F32_e64_:%[0-9]+]]:sreg_64 = V_CMP_TRU_F32_e64 0, [[COPY]], 0, [[COPY1]], 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_CMP_TRU_F32_e64_]]
|
|
%0:vgpr(s32) = COPY $vgpr0
|
|
%1:vgpr(s32) = COPY $vgpr1
|
|
%4:sgpr(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.fcmp), %0, %1, 15
|
|
S_ENDPGM 0, implicit %4
|
|
...
|
|
|
|
---
|
|
name: fcmp_false_f64
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $vgpr0, $vgpr1
|
|
; CHECK-LABEL: name: fcmp_false_f64
|
|
; CHECK: liveins: $vgpr0, $vgpr1
|
|
; CHECK-NEXT: {{ $}}
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK-NEXT: [[V_CVT_F64_F32_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_CVT_F64_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CVT_F64_F32_e64_1:%[0-9]+]]:vreg_64 = nofpexcept V_CVT_F64_F32_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CMP_F_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_F_F64_e64 0, [[V_CVT_F64_F32_e64_]], 0, [[V_CVT_F64_F32_e64_1]], 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_CMP_F_F64_e64_]]
|
|
%0:vgpr(s32) = COPY $vgpr0
|
|
%1:vgpr(s32) = COPY $vgpr1
|
|
%2:vgpr(s64) = G_FPEXT %0
|
|
%3:vgpr(s64) = G_FPEXT %1
|
|
%4:sgpr(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.fcmp), %2, %3, 0
|
|
S_ENDPGM 0, implicit %4
|
|
...
|
|
|
|
---
|
|
name: fcmp_true_f64
|
|
legalized: true
|
|
regBankSelected: true
|
|
tracksRegLiveness: true
|
|
|
|
body: |
|
|
bb.0:
|
|
liveins: $vgpr0, $vgpr1
|
|
; CHECK-LABEL: name: fcmp_true_f64
|
|
; CHECK: liveins: $vgpr0, $vgpr1
|
|
; CHECK-NEXT: {{ $}}
|
|
; CHECK-NEXT: [[COPY:%[0-9]+]]:vgpr_32 = COPY $vgpr0
|
|
; CHECK-NEXT: [[COPY1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
|
|
; CHECK-NEXT: [[V_CVT_F64_F32_e64_:%[0-9]+]]:vreg_64 = nofpexcept V_CVT_F64_F32_e64 0, [[COPY]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CVT_F64_F32_e64_1:%[0-9]+]]:vreg_64 = nofpexcept V_CVT_F64_F32_e64 0, [[COPY1]], 0, 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: [[V_CMP_TRU_F64_e64_:%[0-9]+]]:sreg_64 = V_CMP_TRU_F64_e64 0, [[V_CVT_F64_F32_e64_]], 0, [[V_CVT_F64_F32_e64_1]], 0, implicit $mode, implicit $exec
|
|
; CHECK-NEXT: S_ENDPGM 0, implicit [[V_CMP_TRU_F64_e64_]]
|
|
%0:vgpr(s32) = COPY $vgpr0
|
|
%1:vgpr(s32) = COPY $vgpr1
|
|
%2:vgpr(s64) = G_FPEXT %0
|
|
%3:vgpr(s64) = G_FPEXT %1
|
|
%4:sgpr(s64) = G_INTRINSIC_CONVERGENT intrinsic(@llvm.amdgcn.fcmp), %2, %3, 15
|
|
S_ENDPGM 0, implicit %4
|
|
...
|