; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1,+bf16 < %s | FileCheck %s ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sme2,+bf16 < %s | FileCheck %s ; == Normal Multi-Vector Consecutive Stores == define void @st1_x2_i8( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1b { z2.b, z3.b }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv16i8( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x2_i16( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1h { z2.h, z3.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv8i16( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x2_i32( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1w { z2.s, z3.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv4i32( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x2_i64( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1d { z2.d, z3.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv2i64( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x2_f16( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_f16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1h { z2.h, z3.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv8f16( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x2_bf16( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_bf16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1h { z2.h, z3.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv8bf16( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x2_f32( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1w { z2.s, z3.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv4f32( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x2_f64( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x2_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: st1d { z2.d, z3.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x2.nxv2f64( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_i8( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1b { z4.b - z7.b }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv16i8( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_i16( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1h { z4.h - z7.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv8i16( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_i32( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1w { z4.s - z7.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv4i32( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_i64( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1d { z4.d - z7.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv2i64( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_f16( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_f16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1h { z4.h - z7.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv8f16( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_bf16( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_bf16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1h { z4.h - z7.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv8bf16( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_f32( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1w { z4.s - z7.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv4f32( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @st1_x4_f64( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: st1_x4_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: st1d { z4.d - z7.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.st1.pn.x4.nxv2f64( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } ; == Non-temporal Multi-Vector Consecutive Stores == define void @stnt1_x2_i8( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1b { z2.b, z3.b }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv16i8( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x2_i16( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1h { z2.h, z3.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv8i16( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x2_i32( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1w { z2.s, z3.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv4i32( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x2_i64( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1d { z2.d, z3.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv2i64( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x2_f16( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_f16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1h { z2.h, z3.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv8f16( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x2_bf16( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_bf16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1h { z2.h, z3.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv8bf16( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x2_f32( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1w { z2.s, z3.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv4f32( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x2_f64( %unused, %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x2_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z3.d, z2.d ; CHECK-NEXT: mov z2.d, z1.d ; CHECK-NEXT: stnt1d { z2.d, z3.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x2.nxv2f64( %zn0, %zn1, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_i8( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_i8: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1b { z4.b - z7.b }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv16i8( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_i16( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_i16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1h { z4.h - z7.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv8i16( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_i32( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_i32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1w { z4.s - z7.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv4i32( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_i64( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_i64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1d { z4.d - z7.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv2i64( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_f16( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_f16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1h { z4.h - z7.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv8f16( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_bf16( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_bf16: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1h { z4.h - z7.h }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv8bf16( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_f32( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_f32: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1w { z4.s - z7.s }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv4f32( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } define void @stnt1_x4_f64( %unused, %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr) nounwind { ; CHECK-LABEL: stnt1_x4_f64: ; CHECK: // %bb.0: ; CHECK-NEXT: str x29, [sp, #-16]! // 8-byte Folded Spill ; CHECK-NEXT: addvl sp, sp, #-1 ; CHECK-NEXT: str p8, [sp, #7, mul vl] // 2-byte Folded Spill ; CHECK-NEXT: mov z7.d, z4.d ; CHECK-NEXT: mov p8.b, p0.b ; CHECK-NEXT: mov z6.d, z3.d ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: stnt1d { z4.d - z7.d }, pn8, [x0] ; CHECK-NEXT: ldr p8, [sp, #7, mul vl] // 2-byte Folded Reload ; CHECK-NEXT: addvl sp, sp, #1 ; CHECK-NEXT: ldr x29, [sp], #16 // 8-byte Folded Reload ; CHECK-NEXT: ret call void @llvm.aarch64.sve.stnt1.pn.x4.nxv2f64( %zn0, %zn1, %zn2, %zn3, target("aarch64.svcount") %pn, ptr %ptr); ret void } declare void @llvm.aarch64.sve.st1.pn.x2.nxv16i8(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x2.nxv8i16(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x2.nxv4i32(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x2.nxv2i64(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x2.nxv8f16(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x2.nxv8bf16(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x2.nxv4f32(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x2.nxv2f64(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv16i8(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv8i16(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv4i32(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv2i64(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv8f16(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv8bf16(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv4f32(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x2.nxv2f64(, , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv16i8(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv8i16(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv4i32(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv2i64(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv8f16(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv8bf16(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv4f32(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.st1.pn.x4.nxv2f64(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv16i8(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv8i16(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv4i32(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv2i64(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv8f16(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv8bf16(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv4f32(, , , , target("aarch64.svcount"), ptr) declare void @llvm.aarch64.sve.stnt1.pn.x4.nxv2f64(, , , , target("aarch64.svcount"), ptr)