; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2 -verify-machineinstrs < %s | FileCheck %s ; == 8 to 64-bit elements == define { , } @sel_x2_i8(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.b, z1.b }, pn8, { z6.b, z7.b }, { z4.b, z5.b } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv16i8(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } define { , } @sel_x2_i16(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.h, z1.h }, pn8, { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv8i16(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } define { , } @sel_x2_f16(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_f16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.h, z1.h }, pn8, { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv8f16(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } define { , } @sel_x2_bf16(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_bf16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.h, z1.h }, pn8, { z6.h, z7.h }, { z4.h, z5.h } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv8bf16(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } define { , } @sel_x2_i32(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.s, z1.s }, pn8, { z6.s, z7.s }, { z4.s, z5.s } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv4i32(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } define { , } @sel_x2_f32(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.s, z1.s }, pn8, { z6.s, z7.s }, { z4.s, z5.s } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv4f32(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } define { , } @sel_x2_i64(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.d, z1.d }, pn8, { z6.d, z7.d }, { z4.d, z5.d } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv2i64(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } define { , } @sel_x2_f64(target("aarch64.svcount") %pn, %unused, %zn1, %zn2, %zm1, %zm2) nounwind { ; CHECK-LABEL: sel_x2_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z5.d, z4.d ; CHECK-NEXT: mov z7.d, z2.d ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: sel { z0.d, z1.d }, pn8, { z6.d, z7.d }, { z4.d, z5.d } ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret %res = call { , } @llvm.aarch64.sve.sel.x2.nxv2f64(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) ret { , } %res } ; == 8 to 64-bit elements == declare { , } @llvm.aarch64.sve.sel.x2.nxv16i8(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) declare { , } @llvm.aarch64.sve.sel.x2.nxv8i16(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) declare { , } @llvm.aarch64.sve.sel.x2.nxv4i32(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) declare { , } @llvm.aarch64.sve.sel.x2.nxv2i64(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) declare { , } @llvm.aarch64.sve.sel.x2.nxv8f16(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) declare { , } @llvm.aarch64.sve.sel.x2.nxv8bf16(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) declare { , } @llvm.aarch64.sve.sel.x2.nxv4f32(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2) declare { , } @llvm.aarch64.sve.sel.x2.nxv2f64(target("aarch64.svcount") %pn, %zn1, %zn2, %zm1, %zm2)