; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py$ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s ; ; SQCVT ; ; x2 define @multi_vector_qcvt_x2_s16_s32( %unused, %zn1, %zn2) { ; CHECK-LABEL: multi_vector_qcvt_x2_s16_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: sqcvt z0.h, { z2.s, z3.s } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.sqcvt.x2.nxv4i32( %zn1, %zn2) ret %res } ; x4 define @multi_vector_qcvt_x4_s8_s32( %unused, %zn1, %zn2, %zn3, %zn4) { ; CHECK-LABEL: multi_vector_qcvt_x4_s8_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: sqcvt z0.b, { z4.s - z7.s } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.sqcvt.x4.nxv4i32( %zn1, %zn2, %zn3, %zn4) ret %res } define @multi_vector_qcvt_x4_s16_s64( %unused, %zn1, %zn2, %zn3, %zn4) { ; CHECK-LABEL: multi_vector_qcvt_x4_s16_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: sqcvt z0.h, { z4.d - z7.d } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.sqcvt.x4.nxv2i64( %zn1, %zn2, %zn3, %zn4) ret %res } ; ; UQCVT ; ; x2 define @multi_vector_qcvt_x2_u16_u32( %unused, %zn0, %zn1) { ; CHECK-LABEL: multi_vector_qcvt_x2_u16_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: uqcvt z0.h, { z2.s, z3.s } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.uqcvt.x2.nxv4i32( %zn0, %zn1) ret %res } ; x4 define @multi_vector_qcvt_x4_u8_u32( %unused, %zn1, %zn2, %zn3, %zn4) { ; CHECK-LABEL: multi_vector_qcvt_x4_u8_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: uqcvt z0.b, { z4.s - z7.s } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.uqcvt.x4.nxv4i32( %zn1, %zn2, %zn3, %zn4) ret %res } define @multi_vector_qcvt_x4_u16_u64( %unused, %zn1, %zn2, %zn3, %zn4) { ; CHECK-LABEL: multi_vector_qcvt_x4_u16_u64: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: uqcvt z0.h, { z4.d - z7.d } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.uqcvt.x4.nxv2i64( %zn1, %zn2, %zn3, %zn4) ret %res } ; ; SQCVTU ; ; x2 define @multi_vector_qcvt_x2_s16_u32( %unused, %zn1, %zn2) { ; CHECK-LABEL: multi_vector_qcvt_x2_s16_u32: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: sqcvtu z0.h, { z2.s, z3.s } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.sqcvtu.x2.nxv4i32( %zn1, %zn2) ret %res } ; x4 define @multi_vector_qcvt_x4_u8_s32( %unused, %zn1, %zn2, %zn3, %zn4) { ; CHECK-LABEL: multi_vector_qcvt_x4_u8_s32: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: sqcvtu z0.b, { z4.s - z7.s } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.sqcvtu.x4.nxv4i32( %zn1, %zn2, %zn3, %zn4) ret %res } define @multi_vector_qcvt_x4_u16_s64( %unused, %zn1, %zn2, %zn3, %zn4) { ; CHECK-LABEL: multi_vector_qcvt_x4_u16_s64: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: sqcvtu z0.h, { z4.d - z7.d } ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.sqcvtu.x4.nxv2i64( %zn1, %zn2, %zn3, %zn4) ret %res } declare @llvm.aarch64.sve.sqcvt.x2.nxv4i32(, ) declare @llvm.aarch64.sve.uqcvt.x2.nxv4i32(, ) declare @llvm.aarch64.sve.sqcvtu.x2.nxv4i32(, ) declare @llvm.aarch64.sve.sqcvt.x4.nxv4i32(, , , ) declare @llvm.aarch64.sve.sqcvt.x4.nxv2i64(, , , ) declare @llvm.aarch64.sve.uqcvt.x4.nxv4i32(, , , ) declare @llvm.aarch64.sve.uqcvt.x4.nxv2i64(, , , ) declare @llvm.aarch64.sve.sqcvtu.x4.nxv4i32(, , , ) declare @llvm.aarch64.sve.sqcvtu.x4.nxv2i64(, , , )