; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 4 ; RUN: llc --mtriple=loongarch64 --code-model=medium --post-RA-scheduler=0 < %s \ ; RUN: | FileCheck %s --check-prefix=MEDIUM_NO_SCH ; RUN: llc --mtriple=loongarch64 --code-model=medium --post-RA-scheduler=1 < %s \ ; RUN: | FileCheck %s --check-prefix=MEDIUM_SCH ; RUN: llc --mtriple=loongarch64 --code-model=large --post-RA-scheduler=0 < %s \ ; RUN: | FileCheck %s --check-prefix=LARGE_NO_SCH ; RUN: llc --mtriple=loongarch64 --code-model=large --post-RA-scheduler=1 < %s \ ; RUN: | FileCheck %s --check-prefix=LARGE_SCH ;; FIXME: According to the description of the psABI v2.30, the code sequences ;; of `PseudoLA*_LARGE` instruction and Medium code model's function call must ;; be adjacent. @g = dso_local global i64 zeroinitializer, align 4 @G = global i64 zeroinitializer, align 4 @gd = external thread_local global i64 @ld = external thread_local(localdynamic) global i64 @ie = external thread_local(initialexec) global i64 declare ptr @bar(i64) define void @foo() nounwind { ; MEDIUM_NO_SCH-LABEL: foo: ; MEDIUM_NO_SCH: # %bb.0: ; MEDIUM_NO_SCH-NEXT: addi.d $sp, $sp, -16 ; MEDIUM_NO_SCH-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %got_pc_hi20(G) ; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, %got_pc_lo12(G) ; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, 0 ; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %pc_hi20(g) ; MEDIUM_NO_SCH-NEXT: addi.d $a0, $a0, %pc_lo12(g) ; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, 0 ; MEDIUM_NO_SCH-NEXT: ori $a0, $zero, 1 ; MEDIUM_NO_SCH-NEXT: pcaddu18i $ra, %call36(bar) ; MEDIUM_NO_SCH-NEXT: jirl $ra, $ra, 0 ; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(gd) ; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(gd) ; MEDIUM_NO_SCH-NEXT: ldx.d $a0, $a0, $tp ; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld) ; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ld) ; MEDIUM_NO_SCH-NEXT: ldx.d $a0, $a0, $tp ; MEDIUM_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie) ; MEDIUM_NO_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ie) ; MEDIUM_NO_SCH-NEXT: ldx.d $a0, $a0, $tp ; MEDIUM_NO_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload ; MEDIUM_NO_SCH-NEXT: addi.d $sp, $sp, 16 ; MEDIUM_NO_SCH-NEXT: ret ; ; MEDIUM_SCH-LABEL: foo: ; MEDIUM_SCH: # %bb.0: ; MEDIUM_SCH-NEXT: addi.d $sp, $sp, -16 ; MEDIUM_SCH-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; MEDIUM_SCH-NEXT: pcalau12i $a0, %got_pc_hi20(G) ; MEDIUM_SCH-NEXT: ld.d $a0, $a0, %got_pc_lo12(G) ; MEDIUM_SCH-NEXT: ld.d $a0, $a0, 0 ; MEDIUM_SCH-NEXT: pcalau12i $a0, %pc_hi20(g) ; MEDIUM_SCH-NEXT: addi.d $a0, $a0, %pc_lo12(g) ; MEDIUM_SCH-NEXT: ld.d $a0, $a0, 0 ; MEDIUM_SCH-NEXT: ori $a0, $zero, 1 ; MEDIUM_SCH-NEXT: pcaddu18i $ra, %call36(bar) ; MEDIUM_SCH-NEXT: jirl $ra, $ra, 0 ; MEDIUM_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(gd) ; MEDIUM_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(gd) ; MEDIUM_SCH-NEXT: ldx.d $a0, $a0, $tp ; MEDIUM_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld) ; MEDIUM_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ld) ; MEDIUM_SCH-NEXT: ldx.d $a0, $a0, $tp ; MEDIUM_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie) ; MEDIUM_SCH-NEXT: ld.d $a0, $a0, %ie_pc_lo12(ie) ; MEDIUM_SCH-NEXT: ldx.d $a0, $a0, $tp ; MEDIUM_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload ; MEDIUM_SCH-NEXT: addi.d $sp, $sp, 16 ; MEDIUM_SCH-NEXT: ret ; ; LARGE_NO_SCH-LABEL: foo: ; LARGE_NO_SCH: # %bb.0: ; LARGE_NO_SCH-NEXT: addi.d $sp, $sp, -16 ; LARGE_NO_SCH-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LARGE_NO_SCH-NEXT: pcalau12i $a0, %got_pc_hi20(G) ; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %got_pc_lo12(G) ; LARGE_NO_SCH-NEXT: lu32i.d $t8, %got64_pc_lo20(G) ; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %got64_pc_hi12(G) ; LARGE_NO_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_NO_SCH-NEXT: ld.d $a0, $a0, 0 ; LARGE_NO_SCH-NEXT: pcalau12i $a0, %pc_hi20(g) ; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %pc_lo12(g) ; LARGE_NO_SCH-NEXT: lu32i.d $t8, %pc64_lo20(g) ; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %pc64_hi12(g) ; LARGE_NO_SCH-NEXT: add.d $a0, $t8, $a0 ; LARGE_NO_SCH-NEXT: ld.d $a0, $a0, 0 ; LARGE_NO_SCH-NEXT: ori $a0, $zero, 1 ; LARGE_NO_SCH-NEXT: pcalau12i $ra, %got_pc_hi20(bar) ; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %got_pc_lo12(bar) ; LARGE_NO_SCH-NEXT: lu32i.d $t8, %got64_pc_lo20(bar) ; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %got64_pc_hi12(bar) ; LARGE_NO_SCH-NEXT: ldx.d $ra, $t8, $ra ; LARGE_NO_SCH-NEXT: jirl $ra, $ra, 0 ; LARGE_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(gd) ; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(gd) ; LARGE_NO_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(gd) ; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(gd) ; LARGE_NO_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_NO_SCH-NEXT: ldx.d $a0, $a0, $tp ; LARGE_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld) ; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ld) ; LARGE_NO_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ld) ; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ld) ; LARGE_NO_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_NO_SCH-NEXT: ldx.d $a0, $a0, $tp ; LARGE_NO_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie) ; LARGE_NO_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ie) ; LARGE_NO_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ie) ; LARGE_NO_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ie) ; LARGE_NO_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_NO_SCH-NEXT: ldx.d $a0, $a0, $tp ; LARGE_NO_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload ; LARGE_NO_SCH-NEXT: addi.d $sp, $sp, 16 ; LARGE_NO_SCH-NEXT: ret ; ; LARGE_SCH-LABEL: foo: ; LARGE_SCH: # %bb.0: ; LARGE_SCH-NEXT: addi.d $sp, $sp, -16 ; LARGE_SCH-NEXT: st.d $ra, $sp, 8 # 8-byte Folded Spill ; LARGE_SCH-NEXT: pcalau12i $a0, %got_pc_hi20(G) ; LARGE_SCH-NEXT: addi.d $t8, $zero, %got_pc_lo12(G) ; LARGE_SCH-NEXT: lu32i.d $t8, %got64_pc_lo20(G) ; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %got64_pc_hi12(G) ; LARGE_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_SCH-NEXT: ld.d $a0, $a0, 0 ; LARGE_SCH-NEXT: pcalau12i $a0, %pc_hi20(g) ; LARGE_SCH-NEXT: addi.d $t8, $zero, %pc_lo12(g) ; LARGE_SCH-NEXT: lu32i.d $t8, %pc64_lo20(g) ; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %pc64_hi12(g) ; LARGE_SCH-NEXT: add.d $a0, $t8, $a0 ; LARGE_SCH-NEXT: ld.d $a0, $a0, 0 ; LARGE_SCH-NEXT: ori $a0, $zero, 1 ; LARGE_SCH-NEXT: pcalau12i $ra, %got_pc_hi20(bar) ; LARGE_SCH-NEXT: addi.d $t8, $zero, %got_pc_lo12(bar) ; LARGE_SCH-NEXT: lu32i.d $t8, %got64_pc_lo20(bar) ; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %got64_pc_hi12(bar) ; LARGE_SCH-NEXT: ldx.d $ra, $t8, $ra ; LARGE_SCH-NEXT: jirl $ra, $ra, 0 ; LARGE_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(gd) ; LARGE_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(gd) ; LARGE_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(gd) ; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(gd) ; LARGE_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_SCH-NEXT: ldx.d $a0, $a0, $tp ; LARGE_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ld) ; LARGE_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ld) ; LARGE_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ld) ; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ld) ; LARGE_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_SCH-NEXT: ldx.d $a0, $a0, $tp ; LARGE_SCH-NEXT: pcalau12i $a0, %ie_pc_hi20(ie) ; LARGE_SCH-NEXT: addi.d $t8, $zero, %ie_pc_lo12(ie) ; LARGE_SCH-NEXT: lu32i.d $t8, %ie64_pc_lo20(ie) ; LARGE_SCH-NEXT: lu52i.d $t8, $t8, %ie64_pc_hi12(ie) ; LARGE_SCH-NEXT: ldx.d $a0, $t8, $a0 ; LARGE_SCH-NEXT: ldx.d $a0, $a0, $tp ; LARGE_SCH-NEXT: ld.d $ra, $sp, 8 # 8-byte Folded Reload ; LARGE_SCH-NEXT: addi.d $sp, $sp, 16 ; LARGE_SCH-NEXT: ret %V = load volatile i64, ptr @G %v = load volatile i64, ptr @g call void @bar(i64 1) %v_gd = load volatile i64, ptr @gd %v_ld = load volatile i64, ptr @ld %v_ie = load volatile i64, ptr @ie ret void }