301 lines
16 KiB
Text
301 lines
16 KiB
Text
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
|
|
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
|
|
// RUN: -aux-triple x86_64-unknown-linux-gnu -fcuda-is-device -emit-llvm %s \
|
|
// RUN: -o - | FileCheck %s
|
|
|
|
// RUN: %clang_cc1 -triple amdgcn-amd-amdhsa -target-cpu gfx906 -x hip \
|
|
// RUN: -aux-triple x86_64-pc-windows-msvc -fcuda-is-device -emit-llvm %s \
|
|
// RUN: -o - | FileCheck %s
|
|
|
|
#include "Inputs/cuda.h"
|
|
|
|
// CHECK-LABEL: @_Z16use_dispatch_ptrPi(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[OUT:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[DISPATCH_PTR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr
|
|
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
|
|
// CHECK-NEXT: [[DISPATCH_PTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[DISPATCH_PTR]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
|
|
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP2]], ptr [[DISPATCH_PTR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[DISPATCH_PTR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void use_dispatch_ptr(int* out) {
|
|
const int* dispatch_ptr = (const int*)__builtin_amdgcn_dispatch_ptr();
|
|
*out = *dispatch_ptr;
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z13use_queue_ptrPi(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[OUT:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[QUEUE_PTR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr
|
|
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
|
|
// CHECK-NEXT: [[QUEUE_PTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[QUEUE_PTR]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(4) @llvm.amdgcn.queue.ptr()
|
|
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP2]], ptr [[QUEUE_PTR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[QUEUE_PTR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void use_queue_ptr(int* out) {
|
|
const int* queue_ptr = (const int*)__builtin_amdgcn_queue_ptr();
|
|
*out = *queue_ptr;
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z19use_implicitarg_ptrPi(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[OUT:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[IMPLICITARG_PTR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr
|
|
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
|
|
// CHECK-NEXT: [[IMPLICITARG_PTR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[IMPLICITARG_PTR]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call ptr addrspace(4) @llvm.amdgcn.implicitarg.ptr()
|
|
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr addrspace(4) [[TMP1]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP2]], ptr [[IMPLICITARG_PTR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load ptr, ptr [[IMPLICITARG_PTR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load i32, ptr [[TMP3]], align 4
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i32 [[TMP4]], ptr [[TMP5]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void use_implicitarg_ptr(int* out) {
|
|
const int* implicitarg_ptr = (const int*)__builtin_amdgcn_implicitarg_ptr();
|
|
*out = *implicitarg_ptr;
|
|
}
|
|
|
|
__global__
|
|
//
|
|
void
|
|
// CHECK-LABEL: @_Z12test_ds_fmaxf(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_ADDR]] to ptr
|
|
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
|
|
// CHECK-NEXT: store float [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fmax.f32(ptr addrspace(3) @_ZZ12test_ds_fmaxfE6shared, float [[TMP0]], i32 0, i32 0, i1 false)
|
|
// CHECK-NEXT: store volatile float [[TMP1]], ptr [[X_ASCAST]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
test_ds_fmax(float src) {
|
|
__shared__ float shared;
|
|
volatile float x = __builtin_amdgcn_ds_fmaxf(&shared, src, 0, 0, false);
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z12test_ds_faddf(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_ADDR]] to ptr
|
|
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
|
|
// CHECK-NEXT: store float [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP0:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call contract float @llvm.amdgcn.ds.fadd.f32(ptr addrspace(3) @_ZZ12test_ds_faddfE6shared, float [[TMP0]], i32 0, i32 0, i1 false)
|
|
// CHECK-NEXT: store volatile float [[TMP1]], ptr [[X_ASCAST]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void test_ds_fadd(float src) {
|
|
__shared__ float shared;
|
|
volatile float x = __builtin_amdgcn_ds_faddf(&shared, src, 0, 0, false);
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z12test_ds_fminfPf(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[SHARED:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SHARED]] to ptr
|
|
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_ADDR]] to ptr
|
|
// CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SHARED_ADDR]] to ptr
|
|
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[SHARED_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[SHARED_ASCAST]], align 8
|
|
// CHECK-NEXT: [[SHARED1:%.*]] = load ptr, ptr [[SHARED_ASCAST]], align 8
|
|
// CHECK-NEXT: store float [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: store ptr [[SHARED1]], ptr [[SHARED_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(3)
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(ptr addrspace(3) [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false)
|
|
// CHECK-NEXT: store volatile float [[TMP4]], ptr [[X_ASCAST]], align 4
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void test_ds_fmin(float src, float *shared) {
|
|
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z33test_ret_builtin_nondef_addrspacev(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[X:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = call align 4 dereferenceable(64) ptr addrspace(4) @llvm.amdgcn.dispatch.ptr()
|
|
// CHECK-NEXT: [[TMP1:%.*]] = addrspacecast ptr addrspace(4) [[TMP0]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP1]], ptr [[X_ASCAST]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__device__ void test_ret_builtin_nondef_addrspace() {
|
|
void *x = __builtin_amdgcn_dispatch_ptr();
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z6endpgmv(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: call void @llvm.amdgcn.endpgm()
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void endpgm() {
|
|
__builtin_amdgcn_endpgm();
|
|
}
|
|
|
|
// Check the 64 bit argument is correctly passed to the intrinsic without truncation or assertion.
|
|
|
|
// CHECK-LABEL: @_Z14test_uicmp_i64Pyyy(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[OUT:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[A_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[B_ADDR:%.*]] = alloca i64, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr
|
|
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
|
|
// CHECK-NEXT: [[A_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[A_ADDR]] to ptr
|
|
// CHECK-NEXT: [[B_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[B_ADDR]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i64 [[A:%.*]], ptr [[A_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i64 [[B:%.*]], ptr [[B_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[A_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load i64, ptr [[B_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP3:%.*]] = call i64 @llvm.amdgcn.icmp.i64.i64(i64 [[TMP1]], i64 [[TMP2]], i32 35)
|
|
// CHECK-NEXT: [[TMP4:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i64 [[TMP3]], ptr [[TMP4]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void test_uicmp_i64(unsigned long long *out, unsigned long long a, unsigned long long b)
|
|
{
|
|
*out = __builtin_amdgcn_uicmpl(a, b, 30+5);
|
|
}
|
|
|
|
// Check the 64 bit return value is correctly returned without truncation or assertion.
|
|
|
|
// CHECK-LABEL: @_Z14test_s_memtimePy(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[OUT:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[OUT_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT]] to ptr
|
|
// CHECK-NEXT: [[OUT_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[OUT_ADDR]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[OUT_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: [[OUT1:%.*]] = load ptr, ptr [[OUT_ASCAST]], align 8
|
|
// CHECK-NEXT: store ptr [[OUT1]], ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = call i64 @llvm.amdgcn.s.memtime()
|
|
// CHECK-NEXT: [[TMP2:%.*]] = load ptr, ptr [[OUT_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: store i64 [[TMP1]], ptr [[TMP2]], align 8
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void test_s_memtime(unsigned long long* out)
|
|
{
|
|
*out = __builtin_amdgcn_s_memtime();
|
|
}
|
|
|
|
// Check a generic pointer can be passed as a shared pointer and a generic pointer.
|
|
__device__ void func(float *x);
|
|
|
|
// CHECK-LABEL: @_Z17test_ds_fmin_funcfPf(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[SHARED:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[SRC_ADDR:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[SHARED_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[X:%.*]] = alloca float, align 4, addrspace(5)
|
|
// CHECK-NEXT: [[SHARED_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SHARED]] to ptr
|
|
// CHECK-NEXT: [[SRC_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SRC_ADDR]] to ptr
|
|
// CHECK-NEXT: [[SHARED_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[SHARED_ADDR]] to ptr
|
|
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[SHARED_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[SHARED_ASCAST]], align 8
|
|
// CHECK-NEXT: [[SHARED1:%.*]] = load ptr, ptr [[SHARED_ASCAST]], align 8
|
|
// CHECK-NEXT: store float [[SRC:%.*]], ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: store ptr [[SHARED1]], ptr [[SHARED_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = addrspacecast ptr [[TMP1]] to ptr addrspace(3)
|
|
// CHECK-NEXT: [[TMP3:%.*]] = load float, ptr [[SRC_ADDR_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP4:%.*]] = call contract float @llvm.amdgcn.ds.fmin.f32(ptr addrspace(3) [[TMP2]], float [[TMP3]], i32 0, i32 0, i1 false)
|
|
// CHECK-NEXT: store volatile float [[TMP4]], ptr [[X_ASCAST]], align 4
|
|
// CHECK-NEXT: [[TMP5:%.*]] = load ptr, ptr [[SHARED_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: call void @_Z4funcPf(ptr noundef [[TMP5]]) #[[ATTR8:[0-9]+]]
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void test_ds_fmin_func(float src, float *__restrict shared) {
|
|
volatile float x = __builtin_amdgcn_ds_fminf(shared, src, 0, 0, false);
|
|
func(shared);
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z14test_is_sharedPf(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[X:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[RET:%.*]] = alloca i8, align 1, addrspace(5)
|
|
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
|
|
// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
|
|
// CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RET]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[X_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[X_ASCAST]], align 8
|
|
// CHECK-NEXT: [[X1:%.*]] = load ptr, ptr [[X_ASCAST]], align 8
|
|
// CHECK-NEXT: store ptr [[X1]], ptr [[X_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.amdgcn.is.shared(ptr [[TMP1]])
|
|
// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP2]] to i8
|
|
// CHECK-NEXT: store i8 [[FROMBOOL]], ptr [[RET_ASCAST]], align 1
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void test_is_shared(float *x){
|
|
bool ret = __builtin_amdgcn_is_shared(x);
|
|
}
|
|
|
|
// CHECK-LABEL: @_Z15test_is_privatePi(
|
|
// CHECK-NEXT: entry:
|
|
// CHECK-NEXT: [[X:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[X_ADDR:%.*]] = alloca ptr, align 8, addrspace(5)
|
|
// CHECK-NEXT: [[RET:%.*]] = alloca i8, align 1, addrspace(5)
|
|
// CHECK-NEXT: [[X_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X]] to ptr
|
|
// CHECK-NEXT: [[X_ADDR_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[X_ADDR]] to ptr
|
|
// CHECK-NEXT: [[RET_ASCAST:%.*]] = addrspacecast ptr addrspace(5) [[RET]] to ptr
|
|
// CHECK-NEXT: [[TMP0:%.*]] = addrspacecast ptr addrspace(1) [[X_COERCE:%.*]] to ptr
|
|
// CHECK-NEXT: store ptr [[TMP0]], ptr [[X_ASCAST]], align 8
|
|
// CHECK-NEXT: [[X1:%.*]] = load ptr, ptr [[X_ASCAST]], align 8
|
|
// CHECK-NEXT: store ptr [[X1]], ptr [[X_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP1:%.*]] = load ptr, ptr [[X_ADDR_ASCAST]], align 8
|
|
// CHECK-NEXT: [[TMP2:%.*]] = call i1 @llvm.amdgcn.is.private(ptr [[TMP1]])
|
|
// CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[TMP2]] to i8
|
|
// CHECK-NEXT: store i8 [[FROMBOOL]], ptr [[RET_ASCAST]], align 1
|
|
// CHECK-NEXT: ret void
|
|
//
|
|
__global__ void test_is_private(int *x){
|
|
bool ret = __builtin_amdgcn_is_private(x);
|
|
}
|