110 lines
3.7 KiB
Text
110 lines
3.7 KiB
Text
|
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
|
||
|
# RUN: llc -mtriple=aarch64 -run-pass=instruction-select -verify-machineinstrs %s -o - | FileCheck %s
|
||
|
|
||
|
...
|
||
|
---
|
||
|
name: uaddlv_v8s8
|
||
|
legalized: true
|
||
|
regBankSelected: true
|
||
|
body: |
|
||
|
bb.0:
|
||
|
liveins: $d0
|
||
|
; CHECK-LABEL: name: uaddlv_v8s8
|
||
|
; CHECK: %copy:fpr64 = COPY $d0
|
||
|
; CHECK: [[UADDLVv8i8v:%[0-9]+]]:fpr16 = UADDLVv8i8v %copy
|
||
|
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
|
||
|
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv8i8v]], %subreg.hsub
|
||
|
; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
|
||
|
; CHECK: $w0 = COPY %intrin
|
||
|
; CHECK: RET_ReallyLR implicit $w0
|
||
|
%copy:fpr(<8 x s8>) = COPY $d0
|
||
|
%intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<8 x s8>)
|
||
|
$w0 = COPY %intrin(s32)
|
||
|
RET_ReallyLR implicit $w0
|
||
|
|
||
|
...
|
||
|
---
|
||
|
name: uaddlv_v16s8
|
||
|
legalized: true
|
||
|
regBankSelected: true
|
||
|
body: |
|
||
|
bb.0:
|
||
|
liveins: $q0
|
||
|
|
||
|
; CHECK-LABEL: name: uaddlv_v16s8
|
||
|
; CHECK: %copy:fpr128 = COPY $q0
|
||
|
; CHECK: [[UADDLVv16i8v:%[0-9]+]]:fpr16 = UADDLVv16i8v %copy
|
||
|
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
|
||
|
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv16i8v]], %subreg.hsub
|
||
|
; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
|
||
|
; CHECK: $w0 = COPY %intrin
|
||
|
; CHECK: RET_ReallyLR implicit $w0
|
||
|
%copy:fpr(<16 x s8>) = COPY $q0
|
||
|
%intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<16 x s8>)
|
||
|
$w0 = COPY %intrin(s32)
|
||
|
RET_ReallyLR implicit $w0
|
||
|
...
|
||
|
---
|
||
|
name: uaddlv_v4s16
|
||
|
legalized: true
|
||
|
regBankSelected: true
|
||
|
body: |
|
||
|
bb.0:
|
||
|
liveins: $d0
|
||
|
; CHECK-LABEL: name: uaddlv_v4s16
|
||
|
; CHECK: %copy:fpr64 = COPY $d0
|
||
|
; CHECK: [[UADDLVv4i16v:%[0-9]+]]:fpr32 = UADDLVv4i16v %copy
|
||
|
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
|
||
|
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv4i16v]], %subreg.ssub
|
||
|
; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
|
||
|
; CHECK: $w0 = COPY %intrin
|
||
|
; CHECK: RET_ReallyLR implicit $w0
|
||
|
%copy:fpr(<4 x s16>) = COPY $d0
|
||
|
%intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<4 x s16>)
|
||
|
$w0 = COPY %intrin(s32)
|
||
|
RET_ReallyLR implicit $w0
|
||
|
|
||
|
...
|
||
|
---
|
||
|
name: uaddlv_v8s16
|
||
|
legalized: true
|
||
|
regBankSelected: true
|
||
|
body: |
|
||
|
bb.0:
|
||
|
liveins: $q0
|
||
|
|
||
|
; CHECK-LABEL: name: uaddlv_v8s16
|
||
|
; CHECK: %copy:fpr128 = COPY $q0
|
||
|
; CHECK: [[UADDLVv8i16v:%[0-9]+]]:fpr32 = UADDLVv8i16v %copy
|
||
|
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
|
||
|
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv8i16v]], %subreg.ssub
|
||
|
; CHECK: %intrin:fpr32 = COPY [[INSERT_SUBREG]].ssub
|
||
|
; CHECK: $w0 = COPY %intrin
|
||
|
; CHECK: RET_ReallyLR implicit $w0
|
||
|
%copy:fpr(<8 x s16>) = COPY $q0
|
||
|
%intrin:fpr(s32) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<8 x s16>)
|
||
|
$w0 = COPY %intrin(s32)
|
||
|
RET_ReallyLR implicit $w0
|
||
|
|
||
|
...
|
||
|
---
|
||
|
name: uaddlv_v4s32
|
||
|
legalized: true
|
||
|
regBankSelected: true
|
||
|
body: |
|
||
|
bb.0:
|
||
|
liveins: $q0
|
||
|
|
||
|
; CHECK-LABEL: name: uaddlv_v4s32
|
||
|
; CHECK: %copy:fpr128 = COPY $q0
|
||
|
; CHECK: [[UADDLVv4i32v:%[0-9]+]]:fpr64 = UADDLVv4i32v %copy
|
||
|
; CHECK: [[DEF:%[0-9]+]]:fpr128 = IMPLICIT_DEF
|
||
|
; CHECK: [[INSERT_SUBREG:%[0-9]+]]:fpr128 = INSERT_SUBREG [[DEF]], [[UADDLVv4i32v]], %subreg.dsub
|
||
|
; CHECK: %intrin:fpr64 = COPY [[INSERT_SUBREG]].dsub
|
||
|
; CHECK: $x0 = COPY %intrin
|
||
|
; CHECK: RET_ReallyLR implicit $x0
|
||
|
%copy:fpr(<4 x s32>) = COPY $q0
|
||
|
%intrin:fpr(s64) = G_INTRINSIC intrinsic(@llvm.aarch64.neon.uaddlv), %copy(<4 x s32>)
|
||
|
$x0 = COPY %intrin(s64)
|
||
|
RET_ReallyLR implicit $x0
|