; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=LR,FP,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s ; RUN: llc < %s -mtriple=aarch64-unknown-linux-gnu -reserve-regs-for-regalloc=X30,X29,X28,X27,X26,X25,X24,X23,X22,X21,X20,X19,X18,X17,X16,X15,X14,X13,X12,X11,X10,X9,X8,X7,X6,X5,X4 | FileCheck %s ; LR, FP, X30 and X29 should be correctly recognized and not used. define void @foo(i64 %v1, i64 %v2, ptr %ptr) { ; CHECK-LABEL: foo: ; CHECK: // %bb.0: ; CHECK-NEXT: sub sp, sp, #16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: add x3, x0, x1 ; CHECK-NEXT: str x3, [sp, #8] // 8-byte Folded Spill ; CHECK-NEXT: str x3, [x2, #8] ; CHECK-NEXT: ldr x3, [x2, #16] ; CHECK-NEXT: add x3, x0, x3 ; CHECK-NEXT: sub x3, x3, x1 ; CHECK-NEXT: str x3, [x2, #16] ; CHECK-NEXT: ldr x3, [sp, #8] // 8-byte Folded Reload ; CHECK-NEXT: str x3, [x2, #24] ; CHECK-NEXT: str x0, [x2, #32] ; CHECK-NEXT: str x1, [x2, #40] ; CHECK-NEXT: add sp, sp, #16 ; CHECK-NEXT: ret %v3 = add i64 %v1, %v2 %p1 = getelementptr i64, ptr %ptr, i64 1 store volatile i64 %v3, ptr %p1, align 8 %p2 = getelementptr i64, ptr %ptr, i64 2 %v4 = load volatile i64, ptr %p2, align 8 %v5 = add i64 %v1, %v4 %v6 = sub i64 %v5, %v2 store volatile i64 %v6, ptr %p2, align 8 %p3 = getelementptr i64, ptr %ptr, i64 3 store volatile i64 %v3, ptr %p3, align 8 %p4 = getelementptr i64, ptr %ptr, i64 4 store volatile i64 %v1, ptr %p4, align 8 %p5 = getelementptr i64, ptr %ptr, i64 5 store volatile i64 %v2, ptr %p5, align 8 ret void }