bolt/deps/llvm-18.1.8/llvm/test/CodeGen/AMDGPU/GlobalISel/llvm.memmove.ll
2025-02-14 19:21:04 +01:00

84 lines
3.8 KiB
LLVM

; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-- -verify-machineinstrs -mem-intrinsic-expand-size=3 %s -o - | FileCheck -check-prefix=LOOP %s
; RUN: llc -global-isel -mtriple=amdgcn-- -verify-machineinstrs -mem-intrinsic-expand-size=5 %s -o - | FileCheck -check-prefix=UNROLL %s
declare void @llvm.memmove.p1.p1.i32(ptr addrspace(1), ptr addrspace(1), i32, i1)
define amdgpu_cs void @memmove_p1i8(ptr addrspace(1) %dst, ptr addrspace(1) %src) {
; LOOP-LABEL: memmove_p1i8:
; LOOP: ; %bb.0:
; LOOP-NEXT: v_cmp_ge_u64_e32 vcc, v[2:3], v[0:1]
; LOOP-NEXT: s_and_saveexec_b64 s[0:1], vcc
; LOOP-NEXT: s_xor_b64 s[4:5], exec, s[0:1]
; LOOP-NEXT: s_cbranch_execz .LBB0_3
; LOOP-NEXT: ; %bb.1: ; %copy_forward
; LOOP-NEXT: s_mov_b64 s[6:7], 0
; LOOP-NEXT: s_mov_b32 s2, 0
; LOOP-NEXT: s_mov_b32 s3, 0xf000
; LOOP-NEXT: s_mov_b64 s[0:1], 0
; LOOP-NEXT: v_mov_b32_e32 v4, s6
; LOOP-NEXT: v_mov_b32_e32 v5, s7
; LOOP-NEXT: .LBB0_2: ; %copy_forward_loop
; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1
; LOOP-NEXT: v_add_i32_e32 v6, vcc, v2, v4
; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v3, v5, vcc
; LOOP-NEXT: s_waitcnt expcnt(0)
; LOOP-NEXT: buffer_load_ubyte v8, v[6:7], s[0:3], 0 addr64
; LOOP-NEXT: v_add_i32_e32 v6, vcc, v0, v4
; LOOP-NEXT: v_addc_u32_e32 v7, vcc, v1, v5, vcc
; LOOP-NEXT: v_add_i32_e32 v4, vcc, 1, v4
; LOOP-NEXT: v_addc_u32_e32 v5, vcc, 0, v5, vcc
; LOOP-NEXT: v_cmp_ne_u32_e32 vcc, 4, v4
; LOOP-NEXT: s_waitcnt vmcnt(0)
; LOOP-NEXT: buffer_store_byte v8, v[6:7], s[0:3], 0 addr64
; LOOP-NEXT: s_cbranch_vccnz .LBB0_2
; LOOP-NEXT: .LBB0_3: ; %Flow17
; LOOP-NEXT: s_andn2_saveexec_b64 s[0:1], s[4:5]
; LOOP-NEXT: s_cbranch_execz .LBB0_6
; LOOP-NEXT: ; %bb.4: ; %copy_backwards
; LOOP-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; LOOP-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; LOOP-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; LOOP-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; LOOP-NEXT: s_mov_b32 s0, -4
; LOOP-NEXT: s_mov_b32 s6, 0
; LOOP-NEXT: s_mov_b32 s7, 0xf000
; LOOP-NEXT: s_mov_b64 s[4:5], 0
; LOOP-NEXT: v_mov_b32_e32 v4, s0
; LOOP-NEXT: .LBB0_5: ; %copy_backwards_loop
; LOOP-NEXT: ; =>This Inner Loop Header: Depth=1
; LOOP-NEXT: s_waitcnt expcnt(0)
; LOOP-NEXT: buffer_load_ubyte v5, v[2:3], s[4:7], 0 addr64
; LOOP-NEXT: v_add_i32_e32 v4, vcc, 1, v4
; LOOP-NEXT: s_and_b64 vcc, vcc, exec
; LOOP-NEXT: s_waitcnt vmcnt(0)
; LOOP-NEXT: buffer_store_byte v5, v[0:1], s[4:7], 0 addr64
; LOOP-NEXT: v_add_i32_e64 v0, s[0:1], -1, v0
; LOOP-NEXT: v_addc_u32_e64 v1, s[0:1], -1, v1, s[0:1]
; LOOP-NEXT: v_add_i32_e64 v2, s[0:1], -1, v2
; LOOP-NEXT: v_addc_u32_e64 v3, s[0:1], -1, v3, s[0:1]
; LOOP-NEXT: s_cbranch_vccz .LBB0_5
; LOOP-NEXT: .LBB0_6: ; %memmove_done
; LOOP-NEXT: s_endpgm
;
; UNROLL-LABEL: memmove_p1i8:
; UNROLL: ; %bb.0:
; UNROLL-NEXT: s_mov_b32 s2, 0
; UNROLL-NEXT: s_mov_b32 s3, 0xf000
; UNROLL-NEXT: s_mov_b64 s[0:1], 0
; UNROLL-NEXT: buffer_load_ubyte v4, v[2:3], s[0:3], 0 addr64
; UNROLL-NEXT: buffer_load_ubyte v5, v[2:3], s[0:3], 0 addr64 offset:1
; UNROLL-NEXT: buffer_load_ubyte v6, v[2:3], s[0:3], 0 addr64 offset:2
; UNROLL-NEXT: buffer_load_ubyte v2, v[2:3], s[0:3], 0 addr64 offset:3
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v4, v[0:1], s[0:3], 0 addr64
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v5, v[0:1], s[0:3], 0 addr64 offset:1
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v6, v[0:1], s[0:3], 0 addr64 offset:2
; UNROLL-NEXT: s_waitcnt vmcnt(3)
; UNROLL-NEXT: buffer_store_byte v2, v[0:1], s[0:3], 0 addr64 offset:3
; UNROLL-NEXT: s_endpgm
call void @llvm.memmove.p1.p1.i32(ptr addrspace(1) %dst, ptr addrspace(1) %src, i32 4, i1 false)
ret void
}