bolt/deps/llvm-18.1.8/llvm/test/CodeGen/AMDGPU/amdgpu-late-codegenprepare.ll

96 lines
5.1 KiB
LLVM
Raw Normal View History

2025-02-14 19:21:04 +01:00
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX9
; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1200 -amdgpu-late-codegenprepare %s | FileCheck %s -check-prefix=GFX12
; Make sure we don't crash when trying to create a bitcast between
; address spaces
define amdgpu_kernel void @constant_from_offset_cast_generic_null() {
; GFX9-LABEL: @constant_from_offset_cast_generic_null(
; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT: ret void
;
; GFX12-LABEL: @constant_from_offset_cast_generic_null(
; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT: ret void
;
%load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr null to ptr addrspace(4)), i64 6), align 1
store i8 %load, ptr addrspace(1) undef
ret void
}
define amdgpu_kernel void @constant_from_offset_cast_global_null() {
; GFX9-LABEL: @constant_from_offset_cast_global_null(
; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT: ret void
;
; GFX12-LABEL: @constant_from_offset_cast_global_null(
; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT: ret void
;
%load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) null to ptr addrspace(4)), i64 6), align 1
store i8 %load, ptr addrspace(1) undef
ret void
}
@gv = unnamed_addr addrspace(1) global [64 x i8] undef, align 4
define amdgpu_kernel void @constant_from_offset_cast_global_gv() {
; GFX9-LABEL: @constant_from_offset_cast_global_gv(
; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT: ret void
;
; GFX12-LABEL: @constant_from_offset_cast_global_gv(
; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT: ret void
;
%load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr addrspace(1) @gv to ptr addrspace(4)), i64 6), align 1
store i8 %load, ptr addrspace(1) undef
ret void
}
define amdgpu_kernel void @constant_from_offset_cast_generic_inttoptr() {
; GFX9-LABEL: @constant_from_offset_cast_generic_inttoptr(
; GFX9-NEXT: [[TMP1:%.*]] = load i32, ptr addrspace(4) getelementptr (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 4), align 4
; GFX9-NEXT: [[TMP2:%.*]] = lshr i32 [[TMP1]], 16
; GFX9-NEXT: [[TMP3:%.*]] = trunc i32 [[TMP2]] to i8
; GFX9-NEXT: store i8 [[TMP3]], ptr addrspace(1) undef, align 1
; GFX9-NEXT: ret void
;
; GFX12-LABEL: @constant_from_offset_cast_generic_inttoptr(
; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT: ret void
;
%load = load i8, ptr addrspace(4) getelementptr inbounds (i8, ptr addrspace(4) addrspacecast (ptr inttoptr (i64 128 to ptr) to ptr addrspace(4)), i64 6), align 1
store i8 %load, ptr addrspace(1) undef
ret void
}
define amdgpu_kernel void @constant_from_inttoptr() {
; GFX9-LABEL: @constant_from_inttoptr(
; GFX9-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 4
; GFX9-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX9-NEXT: ret void
;
; GFX12-LABEL: @constant_from_inttoptr(
; GFX12-NEXT: [[LOAD:%.*]] = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
; GFX12-NEXT: store i8 [[LOAD]], ptr addrspace(1) undef, align 1
; GFX12-NEXT: ret void
;
%load = load i8, ptr addrspace(4) inttoptr (i64 128 to ptr addrspace(4)), align 1
store i8 %load, ptr addrspace(1) undef
ret void
}