791 lines
29 KiB
YAML
791 lines
29 KiB
YAML
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
|
# RUN: llc -mtriple=aarch64 -run-pass=aarch64-postlegalizer-lowering -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=LOWER
|
|
# RUN: llc -mtriple=aarch64 -global-isel -start-before=aarch64-postlegalizer-lowering -stop-after=instruction-select -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=SELECT
|
|
#
|
|
# Check that we swap the order of operands on comparisons when it is likely
|
|
# to introduce a folding opportunity.
|
|
#
|
|
# The condition code for the compare should be changed when appropriate.
|
|
#
|
|
# TODO: emitBinOp doesn't know about selectArithExtendedRegister, so some of
|
|
# these cases don't hit in selection yet.
|
|
|
|
...
|
|
---
|
|
name: swap_sextinreg_lhs
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
|
|
; LOWER-LABEL: name: swap_sextinreg_lhs
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %reg:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_sextinreg_lhs
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %reg:gpr64all = COPY $x0
|
|
; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg.sub_32
|
|
; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
|
|
; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 32, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%reg:_(s64) = COPY $x0
|
|
%cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
%cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: dont_swap_more_than_one_use
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
|
|
; The LHS of the compare is used in an add, and a second compare. Don't
|
|
; swap, since we don't gain any folding opportunities here.
|
|
|
|
; LOWER-LABEL: name: dont_swap_more_than_one_use
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %reg0:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg0, 8
|
|
; LOWER-NEXT: %add:_(s64) = G_ADD %cmp_lhs, %reg0
|
|
; LOWER-NEXT: %cmp2:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %add
|
|
; LOWER-NEXT: $w0 = COPY %cmp2(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_more_than_one_use
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
|
|
; SELECT-NEXT: %cmp_lhs:gpr64 = SBFMXri %reg0, 0, 7
|
|
; SELECT-NEXT: %add:gpr64 = ADDXrr %cmp_lhs, %reg0
|
|
; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %add, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp2:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp2
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%reg0:_(s64) = COPY $x0
|
|
%cmp_lhs:_(s64) = G_SEXT_INREG %reg0, 8
|
|
%reg1:_(s64) = COPY $x1
|
|
%cmp1:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %reg1
|
|
|
|
%add:_(s64) = G_ADD %cmp_lhs(s64), %reg0
|
|
%cmp2:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %add
|
|
|
|
$w0 = COPY %cmp2(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: dont_swap_legal_arith_immed_on_rhs
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
; Arithmetic immediates can be folded into compares. If we have one, then
|
|
; don't bother changing anything.
|
|
|
|
; LOWER-LABEL: name: dont_swap_legal_arith_immed_on_rhs
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %reg:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = G_CONSTANT i64 12
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_legal_arith_immed_on_rhs
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %reg:gpr64 = COPY $x0
|
|
; SELECT-NEXT: %cmp_lhs:gpr64common = SBFMXri %reg, 0, 7
|
|
; SELECT-NEXT: [[SUBSXri:%[0-9]+]]:gpr64 = SUBSXri %cmp_lhs, 12, 0, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%reg:_(s64) = COPY $x0
|
|
%cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
|
|
%cmp_rhs:_(s64) = G_CONSTANT i64 12
|
|
%cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_non_arith_immed_on_rhs
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
; If we have a non-arithmetic immediate on the rhs, then we can swap to get
|
|
; a guaranteed folding opportunity.
|
|
|
|
; LOWER-LABEL: name: swap_non_arith_immed_on_rhs
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %reg:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = G_CONSTANT i64 1234567
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_non_arith_immed_on_rhs
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %reg:gpr64all = COPY $x0
|
|
; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg.sub_32
|
|
; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; SELECT-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1234567
|
|
; SELECT-NEXT: %cmp_rhs:gpr64sp = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
|
|
; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 32, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%reg:_(s64) = COPY $x0
|
|
%cmp_lhs:_(s64) = G_SEXT_INREG %reg, 8
|
|
%cmp_rhs:_(s64) = G_CONSTANT i64 1234567
|
|
%cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_and_lhs_0xFF
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
; LOWER-LABEL: name: swap_and_lhs_0xFF
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 255
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_and_lhs_0xFF
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
|
|
; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
|
|
; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
|
|
; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 0, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%and_lhs:_(s64) = COPY $x0
|
|
%cst:_(s64) = G_CONSTANT i64 255
|
|
%cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_and_lhs_0xFFFF
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
; LOWER-LABEL: name: swap_and_lhs_0xFFFF
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 65535
|
|
; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_and_lhs_0xFFFF
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
|
|
; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
|
|
; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
|
|
; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 8, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%cst:_(s64) = G_CONSTANT i64 65535
|
|
%and_lhs:_(s64) = COPY $x0
|
|
%cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_and_lhs_0xFFFFFFFF
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
; LOWER-LABEL: name: swap_and_lhs_0xFFFFFFFF
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 4294967295
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %cst
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sle), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_and_lhs_0xFFFFFFFF
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64sp = COPY $x1
|
|
; SELECT-NEXT: %and_lhs:gpr64all = COPY $x0
|
|
; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %and_lhs.sub_32
|
|
; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 16, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 12, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%and_lhs:_(s64) = COPY $x0
|
|
%cst:_(s64) = G_CONSTANT i64 4294967295
|
|
%cmp_lhs:_(s64) = G_AND %and_lhs, %cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: dont_swap_and_lhs_wrong_mask
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
; 7 isn't an extend mask for G_AND, so there's no folding opportunities
|
|
; here.
|
|
;
|
|
; LOWER-LABEL: name: dont_swap_and_lhs_wrong_mask
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %and_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %not_an_extend_mask:_(s64) = G_CONSTANT i64 7
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_AND %and_lhs, %not_an_extend_mask
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_and_lhs_wrong_mask
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %and_lhs:gpr64 = COPY $x0
|
|
; SELECT-NEXT: %cmp_lhs:gpr64common = ANDXri %and_lhs, 4098
|
|
; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 11, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%and_lhs:_(s64) = COPY $x0
|
|
%not_an_extend_mask:_(s64) = G_CONSTANT i64 7
|
|
%cmp_lhs:_(s64) = G_AND %and_lhs, %not_an_extend_mask(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(sge), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_shl_lhs
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
|
|
; LOWER-LABEL: name: swap_shl_lhs
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_shl_lhs
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
|
|
; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %shl_lhs, 1, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%shl_lhs:_(s64) = COPY $x0
|
|
%cst:_(s64) = G_CONSTANT i64 1
|
|
%cmp_lhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_ashr_lhs
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
|
|
; LOWER-LABEL: name: swap_ashr_lhs
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %ashr_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_ASHR %ashr_lhs, %cst(s64)
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_ashr_lhs
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %ashr_lhs:gpr64 = COPY $x0
|
|
; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %ashr_lhs, 129, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%ashr_lhs:_(s64) = COPY $x0
|
|
%cst:_(s64) = G_CONSTANT i64 1
|
|
%cmp_lhs:_(s64) = G_ASHR %ashr_lhs, %cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_lshr_lhs
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
|
|
; LOWER-LABEL: name: swap_lshr_lhs
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %lshr_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 1
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_LSHR %lshr_lhs, %cst(s64)
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(sgt), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_lshr_lhs
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %lshr_lhs:gpr64 = COPY $x0
|
|
; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_rhs, %lshr_lhs, 65, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 13, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%lshr_lhs:_(s64) = COPY $x0
|
|
%cst:_(s64) = G_CONSTANT i64 1
|
|
%cmp_lhs:_(s64) = G_LSHR %lshr_lhs, %cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: dont_swap_shift_s64_cst_too_large
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
|
|
; Constant for the shift must be <= 63.
|
|
|
|
; LOWER-LABEL: name: dont_swap_shift_s64_cst_too_large
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %too_large:_(s64) = G_CONSTANT i64 64
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %shl_lhs, %too_large(s64)
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_shift_s64_cst_too_large
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
|
|
; SELECT-NEXT: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 64
|
|
; SELECT-NEXT: %too_large:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32
|
|
; SELECT-NEXT: %cmp_lhs:gpr64 = LSLVXr %shl_lhs, %too_large
|
|
; SELECT-NEXT: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%shl_lhs:_(s64) = COPY $x0
|
|
%too_large:_(s64) = G_CONSTANT i64 64
|
|
%cmp_lhs:_(s64) = G_SHL %shl_lhs, %too_large(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
|
|
...
|
|
---
|
|
name: dont_swap_shift_s32_cst_too_large
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $w0, $w1
|
|
|
|
; Constant for the shift must be <= 32.
|
|
|
|
; LOWER-LABEL: name: dont_swap_shift_s32_cst_too_large
|
|
; LOWER: liveins: $w0, $w1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s32) = COPY $w1
|
|
; LOWER-NEXT: %shl_lhs:_(s32) = COPY $w0
|
|
; LOWER-NEXT: %cst:_(s32) = G_CONSTANT i32 32
|
|
; LOWER-NEXT: %cmp_lhs:_(s32) = G_SHL %shl_lhs, %cst(s32)
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s32), %cmp_rhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_shift_s32_cst_too_large
|
|
; SELECT: liveins: $w0, $w1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr32 = COPY $w1
|
|
; SELECT-NEXT: %shl_lhs:gpr32 = COPY $w0
|
|
; SELECT-NEXT: %cst:gpr32 = MOVi32imm 32
|
|
; SELECT-NEXT: %cmp_lhs:gpr32 = LSLVWr %shl_lhs, %cst
|
|
; SELECT-NEXT: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr %cmp_lhs, %cmp_rhs, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s32) = COPY $w1
|
|
|
|
%shl_lhs:_(s32) = COPY $w0
|
|
%cst:_(s32) = G_CONSTANT i32 32
|
|
%cmp_lhs:_(s32) = G_SHL %shl_lhs, %cst(s32)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s32), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: dont_swap_cmn_lhs_no_folding_opportunities
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $x0, $x1
|
|
|
|
; No reason to swap a CMN on the LHS when it won't introduce a constant
|
|
; folding opportunity. We can recognise CMNs on the LHS and RHS, so there's
|
|
; nothing to gain here.
|
|
|
|
; LOWER-LABEL: name: dont_swap_cmn_lhs_no_folding_opportunities
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %sub_rhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_cmn_lhs_no_folding_opportunities
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %sub_rhs:gpr64 = COPY $x0
|
|
; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %sub_rhs, %cmp_rhs, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%sub_rhs:_(s64) = COPY $x0
|
|
%zero:_(s64) = G_CONSTANT i64 0
|
|
%cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: swap_cmn_lhs
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $x0, $x1
|
|
|
|
; Swap when we can see a constant folding opportunity through the sub on
|
|
; the LHS.
|
|
|
|
|
|
; LOWER-LABEL: name: swap_cmn_lhs
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %shl_lhs:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
|
|
; LOWER-NEXT: %cst:_(s64) = G_CONSTANT i64 63
|
|
; LOWER-NEXT: %sub_rhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(ne), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_cmn_lhs
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %shl_lhs:gpr64 = COPY $x0
|
|
; SELECT-NEXT: [[ADDSXrs:%[0-9]+]]:gpr64 = ADDSXrs %cmp_rhs, %shl_lhs, 63, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 0, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%cmp_rhs:_(s64) = COPY $x1
|
|
|
|
%shl_lhs:_(s64) = COPY $x0
|
|
%zero:_(s64) = G_CONSTANT i64 0
|
|
%cst:_(s64) = G_CONSTANT i64 63
|
|
%sub_rhs:_(s64) = G_SHL %shl_lhs, %cst(s64)
|
|
%cmp_lhs:_(s64) = G_SUB %zero, %sub_rhs
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(ne), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: dont_swap_cmn_lhs_when_rhs_more_profitable
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0.entry:
|
|
liveins: $x0, $x1
|
|
|
|
; Don't swap when the RHS's subtract offers a better constant folding
|
|
; opportunity than the LHS's subtract.
|
|
;
|
|
; In this case, the RHS has a supported extend, plus a shift with a constant
|
|
; <= 4.
|
|
|
|
; LOWER-LABEL: name: dont_swap_cmn_lhs_when_rhs_more_profitable
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %zero:_(s64) = G_CONSTANT i64 0
|
|
; LOWER-NEXT: %reg0:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %shl_cst:_(s64) = G_CONSTANT i64 63
|
|
; LOWER-NEXT: %shl:_(s64) = G_SHL %reg0, %shl_cst(s64)
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SUB %zero, %shl
|
|
; LOWER-NEXT: %reg1:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %sext_in_reg:_(s64) = G_SEXT_INREG %reg1, 1
|
|
; LOWER-NEXT: %ashr_cst:_(s64) = G_CONSTANT i64 3
|
|
; LOWER-NEXT: %ashr:_(s64) = G_ASHR %sext_in_reg, %ashr_cst(s64)
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = G_SUB %zero, %ashr
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_cmn_lhs_when_rhs_more_profitable
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %zero:gpr64 = COPY $xzr
|
|
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
|
|
; SELECT-NEXT: %shl:gpr64 = UBFMXri %reg0, 1, 0
|
|
; SELECT-NEXT: %reg1:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %sext_in_reg:gpr64 = SBFMXri %reg1, 0, 0
|
|
; SELECT-NEXT: %cmp_rhs:gpr64 = SUBSXrs %zero, %sext_in_reg, 131, implicit-def dead $nzcv
|
|
; SELECT-NEXT: [[ADDSXrr:%[0-9]+]]:gpr64 = ADDSXrr %shl, %cmp_rhs, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%zero:_(s64) = G_CONSTANT i64 0
|
|
|
|
%reg0:_(s64) = COPY $x0
|
|
%shl_cst:_(s64) = G_CONSTANT i64 63
|
|
%shl:_(s64) = G_SHL %reg0, %shl_cst(s64)
|
|
%cmp_lhs:_(s64) = G_SUB %zero, %shl
|
|
|
|
%reg1:_(s64) = COPY $x1
|
|
%sext_in_reg:_(s64) = G_SEXT_INREG %reg1, 1
|
|
%ashr_cst:_(s64) = G_CONSTANT i64 3
|
|
%ashr:_(s64) = G_ASHR %sext_in_reg, %ashr_cst(s64)
|
|
%cmp_rhs:_(s64) = G_SUB %zero, %ashr
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(eq), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
...
|
|
---
|
|
name: dont_swap_rhs_with_supported_extend
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
; The RHS offers more constant folding opportunities than the LHS.
|
|
|
|
; LOWER-LABEL: name: dont_swap_rhs_with_supported_extend
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %reg0:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %lhs_cst:_(s64) = G_CONSTANT i64 1
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %reg0, %lhs_cst(s64)
|
|
; LOWER-NEXT: %reg1:_(s64) = COPY $x1
|
|
; LOWER-NEXT: %and_mask:_(s64) = G_CONSTANT i64 255
|
|
; LOWER-NEXT: %and:_(s64) = G_AND %reg1, %and_mask
|
|
; LOWER-NEXT: %rhs_cst:_(s64) = G_CONSTANT i64 1
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: dont_swap_rhs_with_supported_extend
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
|
|
; SELECT-NEXT: %cmp_lhs:gpr64 = UBFMXri %reg0, 63, 62
|
|
; SELECT-NEXT: %reg1:gpr64 = COPY $x1
|
|
; SELECT-NEXT: %and:gpr64common = ANDXri %reg1, 4103
|
|
; SELECT-NEXT: [[SUBSXrs:%[0-9]+]]:gpr64 = SUBSXrs %cmp_lhs, %and, 129, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%reg0:_(s64) = COPY $x0
|
|
%lhs_cst:_(s64) = G_CONSTANT i64 1
|
|
%cmp_lhs:_(s64) = G_SHL %reg0, %lhs_cst(s64)
|
|
|
|
%reg1:_(s64) = COPY $x1
|
|
%and_mask:_(s64) = G_CONSTANT i64 255
|
|
%and:_(s64) = G_AND %reg1, %and_mask(s64)
|
|
%rhs_cst:_(s64) = G_CONSTANT i64 1
|
|
%cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(slt), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|
|
|
|
|
|
...
|
|
---
|
|
name: swap_rhs_with_supported_extend
|
|
legalized: true
|
|
tracksRegLiveness: true
|
|
body: |
|
|
bb.0:
|
|
liveins: $x0, $x1
|
|
|
|
; In this case, both the LHS and RHS are fed by a supported extend. However,
|
|
; the LHS' shift has a constant <= 4. This makes it more profitable, so
|
|
; we should swap the operands.
|
|
|
|
; LOWER-LABEL: name: swap_rhs_with_supported_extend
|
|
; LOWER: liveins: $x0, $x1
|
|
; LOWER-NEXT: {{ $}}
|
|
; LOWER-NEXT: %reg0:_(s64) = COPY $x0
|
|
; LOWER-NEXT: %and_mask:_(s64) = G_CONSTANT i64 255
|
|
; LOWER-NEXT: %and:_(s64) = G_AND %reg0, %and_mask
|
|
; LOWER-NEXT: %lhs_cst:_(s64) = G_CONSTANT i64 1
|
|
; LOWER-NEXT: %cmp_lhs:_(s64) = G_SHL %and, %lhs_cst(s64)
|
|
; LOWER-NEXT: %rhs_cst:_(s64) = G_CONSTANT i64 5
|
|
; LOWER-NEXT: %cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
|
|
; LOWER-NEXT: %cmp:_(s32) = G_ICMP intpred(slt), %cmp_rhs(s64), %cmp_lhs
|
|
; LOWER-NEXT: $w0 = COPY %cmp(s32)
|
|
; LOWER-NEXT: RET_ReallyLR implicit $w0
|
|
;
|
|
; SELECT-LABEL: name: swap_rhs_with_supported_extend
|
|
; SELECT: liveins: $x0, $x1
|
|
; SELECT-NEXT: {{ $}}
|
|
; SELECT-NEXT: %reg0:gpr64 = COPY $x0
|
|
; SELECT-NEXT: %and:gpr64common = ANDXri %reg0, 4103
|
|
; SELECT-NEXT: [[COPY:%[0-9]+]]:gpr32all = COPY %reg0.sub_32
|
|
; SELECT-NEXT: [[COPY1:%[0-9]+]]:gpr32 = COPY [[COPY]]
|
|
; SELECT-NEXT: %cmp_rhs:gpr64common = SBFMXri %and, 5, 63
|
|
; SELECT-NEXT: [[SUBSXrx:%[0-9]+]]:gpr64 = SUBSXrx %cmp_rhs, [[COPY1]], 1, implicit-def $nzcv
|
|
; SELECT-NEXT: %cmp:gpr32 = CSINCWr $wzr, $wzr, 10, implicit $nzcv
|
|
; SELECT-NEXT: $w0 = COPY %cmp
|
|
; SELECT-NEXT: RET_ReallyLR implicit $w0
|
|
%reg0:_(s64) = COPY $x0
|
|
%and_mask:_(s64) = G_CONSTANT i64 255
|
|
%and:_(s64) = G_AND %reg0, %and_mask(s64)
|
|
|
|
%lhs_cst:_(s64) = G_CONSTANT i64 1
|
|
%cmp_lhs:_(s64) = G_SHL %and, %lhs_cst(s64)
|
|
|
|
%rhs_cst:_(s64) = G_CONSTANT i64 5
|
|
%cmp_rhs:_(s64) = G_ASHR %and, %rhs_cst(s64)
|
|
|
|
%cmp:_(s32) = G_ICMP intpred(sgt), %cmp_lhs(s64), %cmp_rhs
|
|
$w0 = COPY %cmp(s32)
|
|
RET_ReallyLR implicit $w0
|