; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s ; == 8 to 64-bit elements == define { , , , } @sel_x4_i8(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.b ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1b { z27.b }, p1/z, [x0] ; CHECK-NEXT: sel { z0.b - z3.b }, pn8, { z28.b - z31.b }, { z24.b - z27.b } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv16i8(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } define { , , , } @sel_x4_i16(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.h ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1h { z27.h }, p1/z, [x0] ; CHECK-NEXT: sel { z0.h - z3.h }, pn8, { z28.h - z31.h }, { z24.h - z27.h } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv8i16(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } define { , , , } @sel_x4_f16(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_f16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.h ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1h { z27.h }, p1/z, [x0] ; CHECK-NEXT: sel { z0.h - z3.h }, pn8, { z28.h - z31.h }, { z24.h - z27.h } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv8f16(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } define { , , , } @sel_x4_bf16(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_bf16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.h ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1h { z27.h }, p1/z, [x0] ; CHECK-NEXT: sel { z0.h - z3.h }, pn8, { z28.h - z31.h }, { z24.h - z27.h } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv8bf16(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } define { , , , } @sel_x4_i32(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.s ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1w { z27.s }, p1/z, [x0] ; CHECK-NEXT: sel { z0.s - z3.s }, pn8, { z28.s - z31.s }, { z24.s - z27.s } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv4i32(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } define { , , , } @sel_x4_f32(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.s ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1w { z27.s }, p1/z, [x0] ; CHECK-NEXT: sel { z0.s - z3.s }, pn8, { z28.s - z31.s }, { z24.s - z27.s } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv4f32(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } define { , , , } @sel_x4_i64(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.d ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1d { z27.d }, p1/z, [x0] ; CHECK-NEXT: sel { z0.d - z3.d }, pn8, { z28.d - z31.d }, { z24.d - z27.d } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv2i64(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } define { , , , } @sel_x4_f64(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) nounwind { ; CHECK-LABEL: sel_x4_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: ptrue p1.d ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z31.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: ld1d { z27.d }, p1/z, [x0] ; CHECK-NEXT: sel { z0.d - z3.d }, pn8, { z28.d - z31.d }, { z24.d - z27.d } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , , , } @llvm.aarch64.sve.sel.x4.nxv2f64(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) ret { , , , } %res } ; == 8 to 64-bit elements == declare { , , , } @llvm.aarch64.sve.sel.x4.nxv16i8(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) declare { , , , } @llvm.aarch64.sve.sel.x4.nxv8i16(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) declare { , , , } @llvm.aarch64.sve.sel.x4.nxv4i32(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) declare { , , , } @llvm.aarch64.sve.sel.x4.nxv2i64(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) declare { , , , } @llvm.aarch64.sve.sel.x4.nxv8f16(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) declare { , , , } @llvm.aarch64.sve.sel.x4.nxv8bf16(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) declare { , , , } @llvm.aarch64.sve.sel.x4.nxv4f32(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4) declare { , , , } @llvm.aarch64.sve.sel.x4.nxv2f64(target("aarch64.svcount") %pn, %zn1, %zn2, %zn3, %zn4, %zm1, %zm2, %zm3, %zm4)