457 lines
22 KiB
LLVM
457 lines
22 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||
|
; RUN: llc -march=amdgcn -mcpu=gfx1200 -mattr=-wavefrontsize32,+wavefrontsize64 -verify-machineinstrs < %s | FileCheck %s --check-prefix=GFX12
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negA(<4 x half> %A, <4 x half> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negA:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[1,0,0] neg_hi:[1,0,0]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.A = fneg <4 x half> %A
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %fneg.A, <4 x half> %B, <4 x float> %C)
|
||
|
store <4 x float> %res, ptr addrspace(1) %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negB(<4 x half> %A, <4 x half> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negB:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,1,0] neg_hi:[0,1,0]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.B = fneg <4 x half> %B
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %fneg.B, <4 x float> %C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negC(<4 x half> %A, <4 x half> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.C = fneg <4 x float> %C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fneg.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_f16_absC(<4 x half> %A, <4 x half> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_f16_absC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_negC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_negC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.C = fneg <4 x float> %C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fneg.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_bf16_absC(<4 x i16> %A, <4 x i16> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_bf16_absC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_bf16 v[4:7], v[0:1], v[2:3], v[4:7] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16> %A, <4 x i16> %B, <4 x float> %fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negA(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negA:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[1,0,0] neg_hi:[1,0,0]
|
||
|
; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.A = fneg <4 x half> %A
|
||
|
%res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %fneg.A, <4 x half> %B, <4 x half> %C, i1 0)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negB(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negB:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,1,0] neg_hi:[0,1,0]
|
||
|
; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.B = fneg <4 x half> %B
|
||
|
%res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %fneg.B, <4 x half> %C, i1 0)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negC(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.C = fneg <4 x half> %C
|
||
|
%res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fneg.C, i1 0)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f16_16x16x16_f16_absC(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f16_16x16x16_f16_absC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x half> @llvm.fabs.v4f16(<4 x half> %C)
|
||
|
%res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fabs.C, i1 0)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_fp8_negC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_fp8_negC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_fp8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.C = fneg <4 x float> %C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_fp8_absC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_fp8_absC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_fp8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_fp8_negC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_fp8_negC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_fp8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.C = fneg <4 x float> %C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_fp8_absC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_fp8_absC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_fp8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_bf8_negC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_bf8_negC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_bf8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.C = fneg <4 x float> %C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_fp8_bf8_absC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_fp8_bf8_absC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_fp8_bf8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_bf8_negC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_bf8_negC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_bf8 v[2:5], v0, v1, v[2:5] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.C = fneg <4 x float> %C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fneg.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_bf8_bf8_absC(i32 %A, i32 %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_bf8_bf8_absC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_bf8_bf8 v[2:5], v0, v1, v[2:5] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[6:7], v[2:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.i32.v4f32(i32 %A, i32 %B, <4 x float> %fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_swmmac_f32_16x16x32_f16_negA(<4 x half> %A, <8 x half> %B, <4 x float> %C, i16 %Index, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_swmmac_f32_16x16x32_f16_negA:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_swmmac_f32_16x16x32_f16 v[6:9], v[0:1], v[2:5], v10 neg_lo:[1,0,0] neg_hi:[1,0,0]
|
||
|
; GFX12-NEXT: global_store_b128 v[11:12], v[6:9], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.A = fneg <4 x half> %A
|
||
|
%res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f16.v8f16.v4f32.i16(<4 x half> %fneg.A, <8 x half> %B, <4 x float> %C, i16 %Index)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_swmmac_f32_16x16x32_f16_negB(<4 x half> %A, <8 x half> %B, <4 x float> %C, i16 %Index, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_swmmac_f32_16x16x32_f16_negB:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_swmmac_f32_16x16x32_f16 v[6:9], v[0:1], v[2:5], v10 neg_lo:[0,1,0] neg_hi:[0,1,0]
|
||
|
; GFX12-NEXT: global_store_b128 v[11:12], v[6:9], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.B = fneg <8 x half> %B
|
||
|
%res = call <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f16.v8f16.v4f32.i16(<4 x half> %A, <8 x half> %fneg.B, <4 x float> %C, i16 %Index)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_swmmac_f16_16x16x32_f16_negA(<4 x half> %A, <8 x half> %B, <4 x half> %C, i16 %Index, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_swmmac_f16_16x16x32_f16_negA:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_swmmac_f16_16x16x32_f16 v[6:7], v[0:1], v[2:5], v8 neg_lo:[1,0,0] neg_hi:[1,0,0]
|
||
|
; GFX12-NEXT: global_store_b64 v[9:10], v[6:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.A = fneg <4 x half> %A
|
||
|
%res = call <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v8f16.v4f16.i16(<4 x half> %fneg.A, <8 x half> %B, <4 x half> %C, i16 %Index)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_swmmac_f16_16x16x32_f16_negB(<4 x half> %A, <8 x half> %B, <4 x half> %C, i16 %Index, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_swmmac_f16_16x16x32_f16_negB:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_swmmac_f16_16x16x32_f16 v[6:7], v[0:1], v[2:5], v8 neg_lo:[0,1,0] neg_hi:[0,1,0]
|
||
|
; GFX12-NEXT: global_store_b64 v[9:10], v[6:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.B = fneg <8 x half> %B
|
||
|
%res = call <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v8f16.v4f16.i16(<4 x half> %A, <8 x half> %fneg.B, <4 x half> %C, i16 %Index)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
; both neg and abs patterns (wmma matrix C f32 or f16 )
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negabsC(<4 x half> %A, <4 x half> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negabsC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x float> @llvm.fabs.v4f32(<4 x float> %C)
|
||
|
%fneg.fabs.C = fneg <4 x float> %fabs.C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fneg.fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negabsC(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negabsC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,0,1] neg_hi:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fabs.C = call <4 x half> @llvm.fabs.v4f16(<4 x half> %C)
|
||
|
%fneg.fabs.C = fneg <4 x half> %fabs.C
|
||
|
%res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fneg.fabs.C, i1 0)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_f16_neg_partial_fabsA(<4 x half> %A, <4 x half> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_f16_neg_partial_fabsA:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_and_b32_e32 v7, 0x7fffffff, v7
|
||
|
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[4:7], v[0:1], v[2:3], v[4:7] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b128 v[8:9], v[4:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%el3 = extractelement <4 x float> %C, i32 3
|
||
|
%el3.fabs = call float @llvm.fabs.f32(float %el3)
|
||
|
%partial.fabs.C = insertelement <4 x float> %C, float %el3.fabs, i32 3
|
||
|
%fneg.partial.fabs.C = fneg <4 x float> %partial.fabs.C
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %A, <4 x half> %B, <4 x float> %fneg.partial.fabs.C)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
; A or B matrix modifier and constant in C
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f32_16x16x16_f16_negA_constantC(<4 x half> %A, <4 x half> %B, <4 x float> %C, <4 x float> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f32_16x16x16_f16_negA_constantC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f32_16x16x16_f16 v[6:9], v[0:1], v[2:3], 1.0 neg_lo:[1,0,0] neg_hi:[1,0,0]
|
||
|
; GFX12-NEXT: global_store_b128 v[4:5], v[6:9], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.A = fneg <4 x half> %A
|
||
|
%res = call <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half> %fneg.A, <4 x half> %B, <4 x float> <float 1.0, float 1.0, float 1.0, float 1.0>)
|
||
|
store <4 x float> %res, <4 x float> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negB_constantC(<4 x half> %A, <4 x half> %B, <4 x half> %C, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negB_constantC:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[6:7], v[0:1], v[2:3], 1.0 neg_lo:[0,1,0] neg_hi:[0,1,0]
|
||
|
; GFX12-NEXT: global_store_b64 v[4:5], v[6:7], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%fneg.B = fneg <4 x half> %B
|
||
|
%res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %fneg.B, <4 x half> <half 1.0, half 1.0, half 1.0, half 1.0>, i1 0)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
; pack f16 elements with v_perm_b32 since they don't come from same b32
|
||
|
|
||
|
define amdgpu_ps void @test_wmma_f16_16x16x16_f16_negC_pack(<4 x half> %A, <4 x half> %B, ptr %Caddr, <4 x half> addrspace(1)* %out) {
|
||
|
; GFX12-LABEL: test_wmma_f16_16x16x16_f16_negC_pack:
|
||
|
; GFX12: ; %bb.0: ; %bb
|
||
|
; GFX12-NEXT: flat_load_b128 v[8:11], v[4:5]
|
||
|
; GFX12-NEXT: s_wait_loadcnt_dscnt 0x0
|
||
|
; GFX12-NEXT: v_perm_b32 v5, v11, v10, 0x5040100
|
||
|
; GFX12-NEXT: v_perm_b32 v4, v9, v8, 0x5040100
|
||
|
; GFX12-NEXT: s_delay_alu instid0(VALU_DEP_1)
|
||
|
; GFX12-NEXT: v_wmma_f16_16x16x16_f16 v[4:5], v[0:1], v[2:3], v[4:5] neg_lo:[0,0,1]
|
||
|
; GFX12-NEXT: global_store_b64 v[6:7], v[4:5], off
|
||
|
; GFX12-NEXT: s_nop 0
|
||
|
; GFX12-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
|
||
|
; GFX12-NEXT: s_endpgm
|
||
|
bb:
|
||
|
%C = load <8 x half>, ptr %Caddr
|
||
|
%C_shuffle = shufflevector <8 x half> %C, <8 x half> undef, <4 x i32> <i32 0, i32 2, i32 4, i32 6>
|
||
|
%fneg.C_shuffle = fneg <4 x half> %C_shuffle
|
||
|
%res = call <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half> %A, <4 x half> %B, <4 x half> %fneg.C_shuffle , i1 0)
|
||
|
store <4 x half> %res, <4 x half> addrspace(1)* %out
|
||
|
ret void
|
||
|
}
|
||
|
|
||
|
declare <4 x half> @llvm.fabs.v4f16(<4 x half>)
|
||
|
declare <4 x float> @llvm.fabs.v4f32(<4 x float>)
|
||
|
declare float @llvm.fabs.f32(float)
|
||
|
|
||
|
declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.f16.v4f16.v4f32(<4 x half>, <4 x half>, <4 x float>)
|
||
|
declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf16.v4i16.v4f32(<4 x i16>, <4 x i16>, <4 x float>)
|
||
|
declare <4 x half> @llvm.amdgcn.wmma.f16.16x16x16.f16.v8f16.v8f16(<4 x half>, <4 x half>, <4 x half>, i1 immarg)
|
||
|
declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.fp8.i32.v4f32(i32, i32, <4 x float>)
|
||
|
declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.fp8.bf8.i32.v4f32(i32, i32, <4 x float>)
|
||
|
declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.fp8.i32.v4f32(i32, i32, <4 x float>)
|
||
|
declare <4 x float> @llvm.amdgcn.wmma.f32.16x16x16.bf8.bf8.i32.v4f32(i32, i32, <4 x float>)
|
||
|
declare <4 x float> @llvm.amdgcn.swmmac.f32.16x16x32.f16.v4f16.v8f16.v4f32.i16(<4 x half>, <8 x half>, <4 x float>, i16)
|
||
|
declare <4 x half> @llvm.amdgcn.swmmac.f16.16x16x32.f16.v4f16.v8f16.v4f16.i16(<4 x half>, <8 x half>, <4 x half>, i16)
|