576 lines
20 KiB
LLVM
576 lines
20 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2 < %s | FileCheck %s
|
||
|
; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme < %s | FileCheck %s
|
||
|
|
||
|
;
|
||
|
; WHILEGE
|
||
|
;
|
||
|
|
||
|
define <vscale x 16 x i1> @whilege_b_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilege_b_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.b, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilege_b_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilege_b_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.b, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilege_h_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilege_h_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.h, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilege_h_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilege_h_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.h, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilege_s_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilege_s_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.s, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilege_s_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilege_s_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.s, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilege_d_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilege_d_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.d, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilege_d_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilege_d_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilege p0.d, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilege_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
|
||
|
; CHECK-LABEL: whilege_d_ii_dont_fold_to_ptrue_larger_than_minvec:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: mov w8, #3
|
||
|
; CHECK-NEXT: whilege p0.d, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64 3, i64 0)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilege_b_ii() {
|
||
|
; CHECK-LABEL: whilege_b_ii:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: ptrue p0.b, vl6
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 3, i32 -2)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
|
||
|
; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #9
|
||
|
; CHECK-NEXT: whilege p0.b, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 9, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilege_b_ii_vl_maximum() vscale_range(16, 16) {
|
||
|
; CHECK-LABEL: whilege_b_ii_vl_maximum:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: ptrue p0.b, vl256
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64 255, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_overflow() {
|
||
|
; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_overflow:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #2
|
||
|
; CHECK-NEXT: mov w9, #2147483647
|
||
|
; CHECK-NEXT: movk w8, #32768, lsl #16
|
||
|
; CHECK-NEXT: whilege p0.b, w9, w8
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 2147483647, i32 -2147483646)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilege_b_ii_dont_fold_to_ptrue_increment_overflow() {
|
||
|
; CHECK-LABEL: whilege_b_ii_dont_fold_to_ptrue_increment_overflow:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #2147483647
|
||
|
; CHECK-NEXT: mov w9, #-2147483641
|
||
|
; CHECK-NEXT: whilege p0.b, w9, w8
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32 -2147483641, i32 2147483647)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
;
|
||
|
; WHILEHS
|
||
|
;
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehs_b_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehs_b_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.b, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehs_b_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehs_b_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.b, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilehs_h_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehs_h_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.h, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilehs_h_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehs_h_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.h, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilehs_s_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehs_s_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.s, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilehs_s_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehs_s_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.s, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilehs_d_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehs_d_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.d, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilehs_d_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehs_d_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehs p0.d, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilehs_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
|
||
|
; CHECK-LABEL: whilehs_d_ii_dont_fold_to_ptrue_larger_than_minvec:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: mov w8, #3
|
||
|
; CHECK-NEXT: whilehs p0.d, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64 3, i64 0)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehs_b_ii() {
|
||
|
; CHECK-LABEL: whilehs_b_ii:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: ptrue p0.b, vl7
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 8, i64 2)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
|
||
|
; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #9
|
||
|
; CHECK-NEXT: whilehs p0.b, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 9, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehs_b_ii_vl_maximum() vscale_range(16, 16) {
|
||
|
; CHECK-LABEL: whilehs_b_ii_vl_maximum:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: ptrue p0.b, vl256
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64 255, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_overflow() {
|
||
|
; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_overflow:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #-1
|
||
|
; CHECK-NEXT: mov w9, #6
|
||
|
; CHECK-NEXT: whilehs p0.b, w9, w8
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 6, i32 4294967295)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehs_b_ii_dont_fold_to_ptrue_increment_overflow() {
|
||
|
; CHECK-LABEL: whilehs_b_ii_dont_fold_to_ptrue_increment_overflow:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #-1
|
||
|
; CHECK-NEXT: whilehs p0.b, w8, wzr
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32 4294967295, i32 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
;
|
||
|
; WHILEGT
|
||
|
;
|
||
|
|
||
|
define <vscale x 16 x i1> @whilegt_b_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilegt_b_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.b, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilegt_b_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilegt_b_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.b, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilegt_h_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilegt_h_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.h, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilegt_h_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilegt_h_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.h, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilegt_s_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilegt_s_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.s, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilegt_s_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilegt_s_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.s, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilegt_d_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilegt_d_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.d, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilegt_d_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilegt_d_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilegt p0.d, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilegt_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
|
||
|
; CHECK-LABEL: whilegt_d_ii_dont_fold_to_ptrue_larger_than_minvec:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: mov w8, #3
|
||
|
; CHECK-NEXT: whilegt p0.d, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64 3, i64 0)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilegt_b_ii() {
|
||
|
; CHECK-LABEL: whilegt_b_ii:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: ptrue p0.b, vl5
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 3, i32 -2)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilegt_b_ii_fold_to_ptrue_nonexistent_vl9() {
|
||
|
; CHECK-LABEL: whilegt_b_ii_fold_to_ptrue_nonexistent_vl9:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #9
|
||
|
; CHECK-NEXT: whilegt p0.b, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 9, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilegt_b_ii_vl_maximum() vscale_range(16, 16) {
|
||
|
; CHECK-LABEL: whilegt_b_ii_vl_maximum:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: ptrue p0.b, vl256
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64 256, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilegt_b_ii_dont_fold_to_ptrue_overflow() {
|
||
|
; CHECK-LABEL: whilegt_b_ii_dont_fold_to_ptrue_overflow:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #2147483647
|
||
|
; CHECK-NEXT: mov w9, #-2147483641
|
||
|
; CHECK-NEXT: whilegt p0.b, w9, w8
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32 -2147483641, i32 2147483647)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
;
|
||
|
; WHILEHI
|
||
|
;
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehi_b_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehi_b_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.b, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehi_b_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehi_b_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.b, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilehi_h_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehi_h_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.h, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 8 x i1> @whilehi_h_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehi_h_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.h, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 8 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilehi_s_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehi_s_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.s, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 4 x i1> @whilehi_s_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehi_s_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.s, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 4 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilehi_d_ww(i32 %a, i32 %b) {
|
||
|
; CHECK-LABEL: whilehi_d_ww:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.d, w0, w1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32 %a, i32 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilehi_d_xx(i64 %a, i64 %b) {
|
||
|
; CHECK-LABEL: whilehi_d_xx:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: whilehi p0.d, x0, x1
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 %a, i64 %b)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i1> @whilehi_d_ii_dont_fold_to_ptrue_larger_than_minvec() {
|
||
|
; CHECK-LABEL: whilehi_d_ii_dont_fold_to_ptrue_larger_than_minvec:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: mov w8, #3
|
||
|
; CHECK-NEXT: whilehi p0.d, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64 3, i64 0)
|
||
|
ret <vscale x 2 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehi_b_ii() {
|
||
|
; CHECK-LABEL: whilehi_b_ii:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: ptrue p0.b, vl6
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 8, i64 2)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehi_b_ii_dont_fold_to_ptrue_nonexistent_vl9() {
|
||
|
; CHECK-LABEL: whilehi_b_ii_dont_fold_to_ptrue_nonexistent_vl9:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #9
|
||
|
; CHECK-NEXT: whilehi p0.b, x8, xzr
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 9, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilehi_b_ii_vl_maximum() vscale_range(16, 16) {
|
||
|
; CHECK-LABEL: whilehi_b_ii_vl_maximum:
|
||
|
; CHECK: // %bb.0:
|
||
|
; CHECK-NEXT: ptrue p0.b, vl256
|
||
|
; CHECK-NEXT: ret
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64 256, i64 0)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
define <vscale x 16 x i1> @whilelhi_b_ii_dont_fold_to_ptrue_overflow() {
|
||
|
; CHECK-LABEL: whilelhi_b_ii_dont_fold_to_ptrue_overflow:
|
||
|
; CHECK: // %bb.0: // %entry
|
||
|
; CHECK-NEXT: mov w8, #-1
|
||
|
; CHECK-NEXT: mov w9, #7
|
||
|
; CHECK-NEXT: whilehi p0.b, w9, w8
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%out = call <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32 7, i32 4294967295)
|
||
|
ret <vscale x 16 x i1> %out
|
||
|
}
|
||
|
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i32(i32, i32)
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilege.nxv16i1.i64(i64, i64)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i32(i32, i32)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilege.nxv8i1.i64(i64, i64)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i32(i32, i32)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilege.nxv4i1.i64(i64, i64)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i32(i32, i32)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilege.nxv2i1.i64(i64, i64)
|
||
|
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i32(i32, i32)
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehs.nxv16i1.i64(i64, i64)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i32(i32, i32)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehs.nxv8i1.i64(i64, i64)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i32(i32, i32)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehs.nxv4i1.i64(i64, i64)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i32(i32, i32)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehs.nxv2i1.i64(i64, i64)
|
||
|
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i32(i32, i32)
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilegt.nxv16i1.i64(i64, i64)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i32(i32, i32)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilegt.nxv8i1.i64(i64, i64)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i32(i32, i32)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilegt.nxv4i1.i64(i64, i64)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i32(i32, i32)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilegt.nxv2i1.i64(i64, i64)
|
||
|
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i32(i32, i32)
|
||
|
declare <vscale x 16 x i1> @llvm.aarch64.sve.whilehi.nxv16i1.i64(i64, i64)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i32(i32, i32)
|
||
|
declare <vscale x 8 x i1> @llvm.aarch64.sve.whilehi.nxv8i1.i64(i64, i64)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i32(i32, i32)
|
||
|
declare <vscale x 4 x i1> @llvm.aarch64.sve.whilehi.nxv4i1.i64(i64, i64)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i32(i32, i32)
|
||
|
declare <vscale x 2 x i1> @llvm.aarch64.sve.whilehi.nxv2i1.i64(i64, i64)
|