; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --check-globals ; RUN: opt --mtriple=amdgcn-amd-amdhsa --data-layout=A5 -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=AMDGPU ; RUN: opt --mtriple=nvptx64-- -S -passes=openmp-opt < %s | FileCheck %s --check-prefixes=NVPTX %struct.ident_t = type { i32, i32, i32, i32, ptr } %struct.ConfigurationEnvironmentTy = type { i8, i8, i8, i32, i32, i32, i32, i32, i32 } %struct.KernelEnvironmentTy = type { %struct.ConfigurationEnvironmentTy, ptr, ptr } @0 = private unnamed_addr constant [23 x i8] c";unknown;unknown;0;0;;\00", align 1 @1 = private unnamed_addr constant %struct.ident_t { i32 0, i32 2, i32 0, i32 0, ptr @0 }, align 8 @spmd_callees_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null } @spmd_callees_metadata_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null } @spmd_and_non_spmd_callees_metadata_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null } @spmd_and_non_spmd_callee_kernel_environment = local_unnamed_addr constant %struct.KernelEnvironmentTy { %struct.ConfigurationEnvironmentTy { i8 1, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @1, ptr null } ;. ; AMDGPU: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c" ; AMDGPU: @[[GLOB1:[0-9]+]] = private unnamed_addr constant [[STRUCT_IDENT_T:%.*]] { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8 ; AMDGPU: @[[SPMD_CALLEES_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; AMDGPU: @[[SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; AMDGPU: @[[SPMD_AND_NON_SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; AMDGPU: @[[SPMD_AND_NON_SPMD_CALLEE_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ;. ; NVPTX: @[[GLOB0:[0-9]+]] = private unnamed_addr constant [23 x i8] c" ; NVPTX: @[[GLOB1:[0-9]+]] = private unnamed_addr constant [[STRUCT_IDENT_T:%.*]] { i32 0, i32 2, i32 0, i32 0, ptr @[[GLOB0]] }, align 8 ; NVPTX: @[[SPMD_CALLEES_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 1, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; NVPTX: @[[SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 3, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; NVPTX: @[[SPMD_AND_NON_SPMD_CALLEES_METADATA_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ; NVPTX: @[[SPMD_AND_NON_SPMD_CALLEE_KERNEL_ENVIRONMENT:[a-zA-Z0-9_$"\\.-]+]] = local_unnamed_addr constant [[STRUCT_KERNELENVIRONMENTTY:%.*]] { [[STRUCT_CONFIGURATIONENVIRONMENTTY:%.*]] { i8 0, i8 0, i8 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0 }, ptr @[[GLOB1]], ptr null } ;. define weak void @spmd_callees(i1 %c) #0 { ; AMDGPU-LABEL: define {{[^@]+}}@spmd_callees ; AMDGPU-SAME: (i1 [[C:%.*]]) #[[ATTR0:[0-9]+]] { ; AMDGPU-NEXT: call void @spmd_callees__debug(i1 [[C]]) ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@spmd_callees ; NVPTX-SAME: (i1 [[C:%.*]]) #[[ATTR0:[0-9]+]] { ; NVPTX-NEXT: call void @spmd_callees__debug(i1 [[C]]) ; NVPTX-NEXT: ret void ; call void @spmd_callees__debug(i1 %c) ret void } define internal void @spmd_callees__debug(i1 %c) { ; AMDGPU-LABEL: define {{[^@]+}}@spmd_callees__debug ; AMDGPU-SAME: (i1 [[C:%.*]]) #[[ATTR1:[0-9]+]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_kernel_environment, ptr null) ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU: common.ret: ; AMDGPU-NEXT: ret void ; AMDGPU: user_code.entry: ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10:[0-9]+]] ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17:![0-9]+]] ; AMDGPU-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable1, ptr @__omp_outlined_spmd_amenable2 ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable2 ; AMDGPU-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]] ; AMDGPU: 3: ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable2(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; AMDGPU-NEXT: br label [[TMP7:%.*]] ; AMDGPU: 4: ; AMDGPU-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]] ; AMDGPU: 5: ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; AMDGPU-NEXT: br label [[TMP7]] ; AMDGPU: 6: ; AMDGPU-NEXT: unreachable ; AMDGPU: 7: ; AMDGPU-NEXT: call void @__kmpc_target_deinit() ; AMDGPU-NEXT: br label [[COMMON_RET]] ; ; NVPTX-LABEL: define {{[^@]+}}@spmd_callees__debug ; NVPTX-SAME: (i1 [[C:%.*]]) #[[ATTR1:[0-9]+]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_kernel_environment, ptr null) ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX: common.ret: ; NVPTX-NEXT: ret void ; NVPTX: user_code.entry: ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10:[0-9]+]] ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17:![0-9]+]] ; NVPTX-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable1, ptr @__omp_outlined_spmd_amenable2 ; NVPTX-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable2 ; NVPTX-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]] ; NVPTX: 3: ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable2(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; NVPTX-NEXT: br label [[TMP7:%.*]] ; NVPTX: 4: ; NVPTX-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]] ; NVPTX: 5: ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable1(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; NVPTX-NEXT: br label [[TMP7]] ; NVPTX: 6: ; NVPTX-NEXT: unreachable ; NVPTX: 7: ; NVPTX-NEXT: call void @__kmpc_target_deinit() ; NVPTX-NEXT: br label [[COMMON_RET]] ; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 %0 = call i32 @__kmpc_target_init(ptr @spmd_callees_kernel_environment, ptr null) %exec_user_code = icmp eq i32 %0, -1 br i1 %exec_user_code, label %user_code.entry, label %common.ret common.ret: ; preds = %entry, %user_code.entry ret void user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr @1) store i32 0, ptr %.zero.addr, align 4 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18 %fp = select i1 %c, ptr @__omp_outlined_spmd_amenable1, ptr @__omp_outlined_spmd_amenable2 call void %fp(ptr %.threadid_temp., ptr %.zero.addr) #6 call void @__kmpc_target_deinit() br label %common.ret } ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined_spmd_amenable1(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable1 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8 ; AMDGPU-NEXT: br label [[FOR_COND:%.*]] ; AMDGPU: for.cond: ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; AMDGPU: for.cond.cleanup: ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6:[0-9]+]] ; AMDGPU-NEXT: ret void ; AMDGPU: for.body: ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable1 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8 ; NVPTX-NEXT: br label [[FOR_COND:%.*]] ; NVPTX: for.cond: ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; NVPTX: for.cond.cleanup: ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6:[0-9]+]] ; NVPTX-NEXT: ret void ; NVPTX: for.body: ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP21:![0-9]+]] ; entry: %captured_vars_addrs = alloca [0 x ptr], align 8 br label %for.cond for.cond: ; preds = %for.body, %entry %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %cmp = icmp slt i32 %i.0, 100 br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond call void @spmd_amenable() #10 ret void for.body: ; preds = %for.cond %0 = load i32, ptr %.global_tid., align 4, !tbaa !18 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__1, ptr @__omp_outlined__1_wrapper, ptr %captured_vars_addrs, i64 0) %inc = add nsw i32 %i.0, 1 br label %for.cond, !llvm.loop !22 } ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined__1(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__1 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: call void @unknown() #[[ATTR7:[0-9]+]] ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__1 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: call void @unknown() #[[ATTR7:[0-9]+]] ; NVPTX-NEXT: ret void ; entry: call void @unknown() #11 ret void } ; Function Attrs: convergent norecurse nounwind define internal void @__omp_outlined__1_wrapper(i16 zeroext %0, i32 %1) #3 { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; AMDGPU-NEXT: call void @__omp_outlined__1(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__1_wrapper ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2:[0-9]+]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; NVPTX-NEXT: call void @__omp_outlined__1(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; NVPTX-NEXT: ret void ; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 %global_args = alloca ptr, align 8 store i32 %1, ptr %.addr1, align 4, !tbaa !18 store i32 0, ptr %.zero.addr, align 4 call void @__kmpc_get_shared_variables(ptr %global_args) call void @__omp_outlined__1(ptr %.addr1, ptr %.zero.addr) #6 ret void } ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined_spmd_amenable2(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable2 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[X_H2S:%.*]] = alloca i8, i64 4, align 4, addrspace(5) ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8 ; AMDGPU-NEXT: [[MALLOC_CAST:%.*]] = addrspacecast ptr addrspace(5) [[X_H2S]] to ptr ; AMDGPU-NEXT: call void @use(ptr nocapture [[MALLOC_CAST]]) #[[ATTR6]] ; AMDGPU-NEXT: br label [[FOR_COND:%.*]] ; AMDGPU: for.cond: ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; AMDGPU: for.cond.cleanup: ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6]] ; AMDGPU-NEXT: ret void ; AMDGPU: for.body: ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable2 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[X_H2S:%.*]] = alloca i8, i64 4, align 4 ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [0 x ptr], align 8 ; NVPTX-NEXT: call void @use(ptr nocapture [[X_H2S]]) #[[ATTR6]] ; NVPTX-NEXT: br label [[FOR_COND:%.*]] ; NVPTX: for.cond: ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; NVPTX: for.cond.cleanup: ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6]] ; NVPTX-NEXT: ret void ; NVPTX: for.body: ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 0) ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP24:![0-9]+]] ; entry: %captured_vars_addrs = alloca [0 x ptr], align 8 %x = call align 4 ptr @__kmpc_alloc_shared(i64 4) call void @use(ptr nocapture %x) #10 br label %for.cond for.cond: ; preds = %for.body, %entry %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %cmp = icmp slt i32 %i.0, 100 br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond call void @spmd_amenable() #10 call void @__kmpc_free_shared(ptr %x, i64 4) ret void for.body: ; preds = %for.cond %0 = load i32, ptr %.global_tid., align 4, !tbaa !18 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__3, ptr @__omp_outlined__3_wrapper, ptr %captured_vars_addrs, i64 0) %inc = add nsw i32 %i.0, 1 br label %for.cond, !llvm.loop !25 } ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined__3(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: call void @unknown() #[[ATTR7]] ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: call void @unknown() #[[ATTR7]] ; NVPTX-NEXT: ret void ; entry: call void @unknown() #11 ret void } ; Function Attrs: convergent norecurse nounwind define internal void @__omp_outlined__3_wrapper(i16 zeroext %0, i32 %1) #3 { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; AMDGPU-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__3_wrapper ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; NVPTX-NEXT: call void @__omp_outlined__3(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; NVPTX-NEXT: ret void ; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 %global_args = alloca ptr, align 8 store i32 %1, ptr %.addr1, align 4, !tbaa !18 store i32 0, ptr %.zero.addr, align 4 call void @__kmpc_get_shared_variables(ptr %global_args) call void @__omp_outlined__3(ptr %.addr1, ptr %.zero.addr) #6 ret void } ; Function Attrs: alwaysinline convergent norecurse nounwind define weak void @spmd_and_non_spmd_callee(i1 %c) #0 { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callee ; AMDGPU-SAME: (i1 [[C:%.*]]) #[[ATTR0]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callee_kernel_environment, ptr null) ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] ; AMDGPU: is_worker_check: ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] ; AMDGPU: worker_state_machine.begin: ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] ; AMDGPU: worker_state_machine.finished: ; AMDGPU-NEXT: ret void ; AMDGPU: worker_state_machine.is_active.check: ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] ; AMDGPU: worker_state_machine.parallel_region.fallback.execute: ; AMDGPU-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] ; AMDGPU: worker_state_machine.parallel_region.end: ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel() ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] ; AMDGPU: worker_state_machine.done.barrier: ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] ; AMDGPU: thread.user_code.check: ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU: common.ret: ; AMDGPU-NEXT: ret void ; AMDGPU: user_code.entry: ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]] ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable3, ptr @__omp_outlined_not_spmd_amenable ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_not_spmd_amenable ; AMDGPU-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]] ; AMDGPU: 3: ; AMDGPU-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; AMDGPU-NEXT: br label [[TMP7:%.*]] ; AMDGPU: 4: ; AMDGPU-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]] ; AMDGPU: 5: ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable3(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; AMDGPU-NEXT: br label [[TMP7]] ; AMDGPU: 6: ; AMDGPU-NEXT: unreachable ; AMDGPU: 7: ; AMDGPU-NEXT: call void @__kmpc_target_deinit() ; AMDGPU-NEXT: br label [[COMMON_RET]] ; ; NVPTX-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callee ; NVPTX-SAME: (i1 [[C:%.*]]) #[[ATTR0]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callee_kernel_environment, ptr null) ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] ; NVPTX: is_worker_check: ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] ; NVPTX: worker_state_machine.begin: ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] ; NVPTX: worker_state_machine.finished: ; NVPTX-NEXT: ret void ; NVPTX: worker_state_machine.is_active.check: ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] ; NVPTX: worker_state_machine.parallel_region.fallback.execute: ; NVPTX-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] ; NVPTX: worker_state_machine.parallel_region.end: ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel() ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] ; NVPTX: worker_state_machine.done.barrier: ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] ; NVPTX: thread.user_code.check: ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX: common.ret: ; NVPTX-NEXT: ret void ; NVPTX: user_code.entry: ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]] ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: [[FP:%.*]] = select i1 [[C]], ptr @__omp_outlined_spmd_amenable3, ptr @__omp_outlined_not_spmd_amenable ; NVPTX-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_not_spmd_amenable ; NVPTX-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]] ; NVPTX: 3: ; NVPTX-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; NVPTX-NEXT: br label [[TMP7:%.*]] ; NVPTX: 4: ; NVPTX-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]] ; NVPTX: 5: ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable3(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) #[[ATTR10]] ; NVPTX-NEXT: br label [[TMP7]] ; NVPTX: 6: ; NVPTX-NEXT: unreachable ; NVPTX: 7: ; NVPTX-NEXT: call void @__kmpc_target_deinit() ; NVPTX-NEXT: br label [[COMMON_RET]] ; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 %0 = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callee_kernel_environment, ptr null) %exec_user_code = icmp eq i32 %0, -1 br i1 %exec_user_code, label %user_code.entry, label %common.ret common.ret: ; preds = %entry, %user_code.entry ret void user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr @1) store i32 0, ptr %.zero.addr, align 4 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18 %fp = select i1 %c, ptr @__omp_outlined_spmd_amenable3, ptr @__omp_outlined_not_spmd_amenable call void %fp(ptr %.threadid_temp., ptr %.zero.addr) #6 call void @__kmpc_target_deinit() br label %common.ret } ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined_spmd_amenable3(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable3 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8 ; AMDGPU-NEXT: [[X:%.*]] = call align 4 ptr @__kmpc_alloc_shared(i64 4) #[[ATTR10]] ; AMDGPU-NEXT: br label [[FOR_COND:%.*]] ; AMDGPU: for.cond: ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; AMDGPU: for.cond.cleanup: ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6]] ; AMDGPU-NEXT: call void @__kmpc_free_shared(ptr [[X]], i64 4) #[[ATTR10]] ; AMDGPU-NEXT: ret void ; AMDGPU: for.body: ; AMDGPU-NEXT: store ptr [[X]], ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA25:![0-9]+]] ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1) ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable3 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[CAPTURED_VARS_ADDRS:%.*]] = alloca [1 x ptr], align 8 ; NVPTX-NEXT: [[X:%.*]] = call align 4 ptr @__kmpc_alloc_shared(i64 4) #[[ATTR10]] ; NVPTX-NEXT: br label [[FOR_COND:%.*]] ; NVPTX: for.cond: ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; NVPTX: for.cond.cleanup: ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6]] ; NVPTX-NEXT: call void @__kmpc_free_shared(ptr [[X]], i64 4) #[[ATTR10]] ; NVPTX-NEXT: ret void ; NVPTX: for.body: ; NVPTX-NEXT: store ptr [[X]], ptr [[CAPTURED_VARS_ADDRS]], align 8, !tbaa [[TBAA25:![0-9]+]] ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr [[CAPTURED_VARS_ADDRS]], i64 1) ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP27:![0-9]+]] ; entry: %captured_vars_addrs = alloca [1 x ptr], align 8 %x = call align 4 ptr @__kmpc_alloc_shared(i64 4) br label %for.cond for.cond: ; preds = %for.body, %entry %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %cmp = icmp slt i32 %i.0, 100 br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond call void @spmd_amenable() #10 call void @__kmpc_free_shared(ptr %x, i64 4) ret void for.body: ; preds = %for.cond store ptr %x, ptr %captured_vars_addrs, align 8, !tbaa !26 %0 = load i32, ptr %.global_tid., align 4, !tbaa !18 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__5, ptr @__omp_outlined__5_wrapper, ptr %captured_vars_addrs, i64 1) %inc = add nsw i32 %i.0, 1 br label %for.cond, !llvm.loop !28 } ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined__5(ptr noalias %.global_tid., ptr noalias %.bound_tid., ptr nonnull align 4 dereferenceable(4) %x) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[X]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 ; AMDGPU-NEXT: store i32 [[INC]], ptr [[X]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: call void @unknown() #[[ATTR7]] ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[X]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: [[INC:%.*]] = add nsw i32 [[TMP0]], 1 ; NVPTX-NEXT: store i32 [[INC]], ptr [[X]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: call void @unknown() #[[ATTR7]] ; NVPTX-NEXT: ret void ; entry: %0 = load i32, ptr %x, align 4, !tbaa !18 %inc = add nsw i32 %0, 1 store i32 %inc, ptr %x, align 4, !tbaa !18 call void @unknown() #11 ret void } ; Function Attrs: convergent norecurse nounwind define internal void @__omp_outlined__5_wrapper(i16 zeroext %0, i32 %1) #3 { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 ; AMDGPU-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; AMDGPU-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 8 ; AMDGPU-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8, !tbaa [[TBAA25]] ; AMDGPU-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP3]]) #[[ATTR10]] ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__5_wrapper ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR2]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[DOTADDR1:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[GLOBAL_ARGS:%.*]] = alloca ptr, align 8 ; NVPTX-NEXT: call void @__kmpc_get_shared_variables(ptr [[GLOBAL_ARGS]]) ; NVPTX-NEXT: [[TMP2:%.*]] = load ptr, ptr [[GLOBAL_ARGS]], align 8 ; NVPTX-NEXT: [[TMP3:%.*]] = load ptr, ptr [[TMP2]], align 8, !tbaa [[TBAA25]] ; NVPTX-NEXT: call void @__omp_outlined__5(ptr [[DOTADDR1]], ptr [[DOTZERO_ADDR]], ptr [[TMP3]]) #[[ATTR10]] ; NVPTX-NEXT: ret void ; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 %global_args = alloca ptr, align 8 store i32 %1, ptr %.addr1, align 4, !tbaa !18 store i32 0, ptr %.zero.addr, align 4 call void @__kmpc_get_shared_variables(ptr %global_args) %2 = load ptr, ptr %global_args, align 8 %3 = load ptr, ptr %2, align 8, !tbaa !26 call void @__omp_outlined__5(ptr %.addr1, ptr %.zero.addr, ptr %3) #6 ret void } ; Function Attrs: alwaysinline convergent norecurse nounwind define weak void @spmd_callees_metadata(ptr %fp) #0 { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@spmd_callees_metadata ; AMDGPU-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_metadata_kernel_environment, ptr null) ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU: common.ret: ; AMDGPU-NEXT: ret void ; AMDGPU: user_code.entry: ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]] ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) ; AMDGPU-NEXT: call void @__kmpc_target_deinit() ; AMDGPU-NEXT: br label [[COMMON_RET]] ; ; NVPTX-LABEL: define {{[^@]+}}@spmd_callees_metadata ; NVPTX-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_callees_metadata_kernel_environment, ptr null) ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX: common.ret: ; NVPTX-NEXT: ret void ; NVPTX: user_code.entry: ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]] ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) ; NVPTX-NEXT: call void @__kmpc_target_deinit() ; NVPTX-NEXT: br label [[COMMON_RET]] ; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 %0 = call i32 @__kmpc_target_init(ptr @spmd_callees_metadata_kernel_environment, ptr null) %exec_user_code = icmp eq i32 %0, -1 br i1 %exec_user_code, label %user_code.entry, label %common.ret common.ret: ; preds = %entry, %user_code.entry ret void user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr @1) store i32 0, ptr %.zero.addr, align 4 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18 call void %fp(ptr %.threadid_temp., ptr %.zero.addr), !callees !31 call void @__kmpc_target_deinit() br label %common.ret } ; Function Attrs: alwaysinline convergent norecurse nounwind define weak void @spmd_and_non_spmd_callees_metadata(ptr %fp) #0 { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callees_metadata ; AMDGPU-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8, addrspace(5) ; AMDGPU-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; AMDGPU-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callees_metadata_kernel_environment, ptr null) ; AMDGPU-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 ; AMDGPU-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] ; AMDGPU: is_worker_check: ; AMDGPU-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() ; AMDGPU-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() ; AMDGPU-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] ; AMDGPU-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] ; AMDGPU-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] ; AMDGPU: worker_state_machine.begin: ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; AMDGPU-NEXT: [[WORKER_WORK_FN_ADDR_GENERIC:%.*]] = addrspacecast ptr addrspace(5) [[WORKER_WORK_FN_ADDR]] to ptr ; AMDGPU-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR_GENERIC]]) ; AMDGPU-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR_GENERIC]], align 8 ; AMDGPU-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null ; AMDGPU-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] ; AMDGPU: worker_state_machine.finished: ; AMDGPU-NEXT: ret void ; AMDGPU: worker_state_machine.is_active.check: ; AMDGPU-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] ; AMDGPU: worker_state_machine.parallel_region.fallback.execute: ; AMDGPU-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] ; AMDGPU: worker_state_machine.parallel_region.end: ; AMDGPU-NEXT: call void @__kmpc_kernel_end_parallel() ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] ; AMDGPU: worker_state_machine.done.barrier: ; AMDGPU-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; AMDGPU-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] ; AMDGPU: thread.user_code.check: ; AMDGPU-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; AMDGPU-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; AMDGPU: common.ret: ; AMDGPU-NEXT: ret void ; AMDGPU: user_code.entry: ; AMDGPU-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]] ; AMDGPU-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; AMDGPU-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable_external ; AMDGPU-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]] ; AMDGPU: 3: ; AMDGPU-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) ; AMDGPU-NEXT: br label [[TMP7:%.*]] ; AMDGPU: 4: ; AMDGPU-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]] ; AMDGPU: 5: ; AMDGPU-NEXT: call void @__omp_outlined_not_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) ; AMDGPU-NEXT: br label [[TMP7]] ; AMDGPU: 6: ; AMDGPU-NEXT: unreachable ; AMDGPU: 7: ; AMDGPU-NEXT: call void @__kmpc_target_deinit() ; AMDGPU-NEXT: br label [[COMMON_RET]] ; ; NVPTX-LABEL: define {{[^@]+}}@spmd_and_non_spmd_callees_metadata ; NVPTX-SAME: (ptr [[FP:%.*]]) #[[ATTR0]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: [[WORKER_WORK_FN_ADDR:%.*]] = alloca ptr, align 8 ; NVPTX-NEXT: [[DOTZERO_ADDR:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4 ; NVPTX-NEXT: [[TMP0:%.*]] = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callees_metadata_kernel_environment, ptr null) ; NVPTX-NEXT: [[THREAD_IS_WORKER:%.*]] = icmp ne i32 [[TMP0]], -1 ; NVPTX-NEXT: br i1 [[THREAD_IS_WORKER]], label [[IS_WORKER_CHECK:%.*]], label [[THREAD_USER_CODE_CHECK:%.*]] ; NVPTX: is_worker_check: ; NVPTX-NEXT: [[BLOCK_HW_SIZE:%.*]] = call i32 @__kmpc_get_hardware_num_threads_in_block() ; NVPTX-NEXT: [[WARP_SIZE:%.*]] = call i32 @__kmpc_get_warp_size() ; NVPTX-NEXT: [[BLOCK_SIZE:%.*]] = sub i32 [[BLOCK_HW_SIZE]], [[WARP_SIZE]] ; NVPTX-NEXT: [[THREAD_IS_MAIN_OR_WORKER:%.*]] = icmp slt i32 [[TMP0]], [[BLOCK_SIZE]] ; NVPTX-NEXT: br i1 [[THREAD_IS_MAIN_OR_WORKER]], label [[WORKER_STATE_MACHINE_BEGIN:%.*]], label [[WORKER_STATE_MACHINE_FINISHED:%.*]] ; NVPTX: worker_state_machine.begin: ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; NVPTX-NEXT: [[WORKER_IS_ACTIVE:%.*]] = call i1 @__kmpc_kernel_parallel(ptr [[WORKER_WORK_FN_ADDR]]) ; NVPTX-NEXT: [[WORKER_WORK_FN:%.*]] = load ptr, ptr [[WORKER_WORK_FN_ADDR]], align 8 ; NVPTX-NEXT: [[WORKER_IS_DONE:%.*]] = icmp eq ptr [[WORKER_WORK_FN]], null ; NVPTX-NEXT: br i1 [[WORKER_IS_DONE]], label [[WORKER_STATE_MACHINE_FINISHED]], label [[WORKER_STATE_MACHINE_IS_ACTIVE_CHECK:%.*]] ; NVPTX: worker_state_machine.finished: ; NVPTX-NEXT: ret void ; NVPTX: worker_state_machine.is_active.check: ; NVPTX-NEXT: br i1 [[WORKER_IS_ACTIVE]], label [[WORKER_STATE_MACHINE_PARALLEL_REGION_FALLBACK_EXECUTE:%.*]], label [[WORKER_STATE_MACHINE_DONE_BARRIER:%.*]] ; NVPTX: worker_state_machine.parallel_region.fallback.execute: ; NVPTX-NEXT: call void [[WORKER_WORK_FN]](i16 0, i32 [[TMP0]]) ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_PARALLEL_REGION_END:%.*]] ; NVPTX: worker_state_machine.parallel_region.end: ; NVPTX-NEXT: call void @__kmpc_kernel_end_parallel() ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_DONE_BARRIER]] ; NVPTX: worker_state_machine.done.barrier: ; NVPTX-NEXT: call void @__kmpc_barrier_simple_generic(ptr @[[GLOB1]], i32 [[TMP0]]) ; NVPTX-NEXT: br label [[WORKER_STATE_MACHINE_BEGIN]] ; NVPTX: thread.user_code.check: ; NVPTX-NEXT: [[EXEC_USER_CODE:%.*]] = icmp eq i32 [[TMP0]], -1 ; NVPTX-NEXT: br i1 [[EXEC_USER_CODE]], label [[USER_CODE_ENTRY:%.*]], label [[COMMON_RET:%.*]] ; NVPTX: common.ret: ; NVPTX-NEXT: ret void ; NVPTX: user_code.entry: ; NVPTX-NEXT: [[TMP1:%.*]] = call i32 @__kmpc_global_thread_num(ptr @[[GLOB1]]) #[[ATTR10]] ; NVPTX-NEXT: store i32 0, ptr [[DOTZERO_ADDR]], align 4 ; NVPTX-NEXT: store i32 [[TMP1]], ptr [[DOTTHREADID_TEMP_]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: [[TMP2:%.*]] = icmp eq ptr [[FP]], @__omp_outlined_spmd_amenable_external ; NVPTX-NEXT: br i1 [[TMP2]], label [[TMP3:%.*]], label [[TMP4:%.*]] ; NVPTX: 3: ; NVPTX-NEXT: call void @__omp_outlined_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) ; NVPTX-NEXT: br label [[TMP7:%.*]] ; NVPTX: 4: ; NVPTX-NEXT: br i1 true, label [[TMP5:%.*]], label [[TMP6:%.*]] ; NVPTX: 5: ; NVPTX-NEXT: call void @__omp_outlined_not_spmd_amenable_external(ptr [[DOTTHREADID_TEMP_]], ptr [[DOTZERO_ADDR]]) ; NVPTX-NEXT: br label [[TMP7]] ; NVPTX: 6: ; NVPTX-NEXT: unreachable ; NVPTX: 7: ; NVPTX-NEXT: call void @__kmpc_target_deinit() ; NVPTX-NEXT: br label [[COMMON_RET]] ; entry: %.zero.addr = alloca i32, align 4 %.threadid_temp. = alloca i32, align 4 %0 = call i32 @__kmpc_target_init(ptr @spmd_and_non_spmd_callees_metadata_kernel_environment, ptr null) %exec_user_code = icmp eq i32 %0, -1 br i1 %exec_user_code, label %user_code.entry, label %common.ret common.ret: ; preds = %entry, %user_code.entry ret void user_code.entry: ; preds = %entry %1 = call i32 @__kmpc_global_thread_num(ptr @1) store i32 0, ptr %.zero.addr, align 4 store i32 %1, ptr %.threadid_temp., align 4, !tbaa !18 call void %fp(ptr %.threadid_temp., ptr %.zero.addr), !callees !32 call void @__kmpc_target_deinit() br label %common.ret } ; Function Attrs: alwaysinline convergent norecurse nounwind define void @__omp_outlined_spmd_amenable_external(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable_external ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: br label [[FOR_COND:%.*]] ; AMDGPU: for.cond: ; AMDGPU-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; AMDGPU-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; AMDGPU-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; AMDGPU: for.cond.cleanup: ; AMDGPU-NEXT: call void @spmd_amenable() #[[ATTR6]] ; AMDGPU-NEXT: ret void ; AMDGPU: for.body: ; AMDGPU-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; AMDGPU-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr undef, i64 0) ; AMDGPU-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; AMDGPU-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_spmd_amenable_external ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: br label [[FOR_COND:%.*]] ; NVPTX: for.cond: ; NVPTX-NEXT: [[I_0:%.*]] = phi i32 [ 0, [[ENTRY:%.*]] ], [ [[INC:%.*]], [[FOR_BODY:%.*]] ] ; NVPTX-NEXT: [[CMP:%.*]] = icmp slt i32 [[I_0]], 100 ; NVPTX-NEXT: br i1 [[CMP]], label [[FOR_BODY]], label [[FOR_COND_CLEANUP:%.*]] ; NVPTX: for.cond.cleanup: ; NVPTX-NEXT: call void @spmd_amenable() #[[ATTR6]] ; NVPTX-NEXT: ret void ; NVPTX: for.body: ; NVPTX-NEXT: [[TMP0:%.*]] = load i32, ptr [[DOTGLOBAL_TID_]], align 4, !tbaa [[TBAA17]] ; NVPTX-NEXT: call void @__kmpc_parallel_51(ptr @[[GLOB1]], i32 [[TMP0]], i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr undef, i64 0) ; NVPTX-NEXT: [[INC]] = add nsw i32 [[I_0]], 1 ; NVPTX-NEXT: br label [[FOR_COND]], !llvm.loop [[LOOP28:![0-9]+]] ; entry: br label %for.cond for.cond: ; preds = %for.body, %entry %i.0 = phi i32 [ 0, %entry ], [ %inc, %for.body ] %cmp = icmp slt i32 %i.0, 100 br i1 %cmp, label %for.body, label %for.cond.cleanup for.cond.cleanup: ; preds = %for.cond call void @spmd_amenable() #10 ret void for.body: ; preds = %for.cond %0 = load i32, ptr %.global_tid., align 4, !tbaa !18 call void @__kmpc_parallel_51(ptr @1, i32 %0, i32 1, i32 -1, i32 -1, ptr @__omp_outlined__7, ptr @__omp_outlined__7_wrapper, ptr undef, i64 0) %inc = add nsw i32 %i.0, 1 br label %for.cond, !llvm.loop !29 } ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined__7(ptr noalias %.global_tid., ptr noalias %.bound_tid., ptr nonnull align 4 dereferenceable(4) %x) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7 ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7 ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]], ptr nonnull align 4 dereferenceable(4) [[X:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: ret void ; entry: ret void } ; Function Attrs: convergent norecurse nounwind define internal void @__omp_outlined__7_wrapper(i16 zeroext %0, i32 %1) #3 { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper ; AMDGPU-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined__7_wrapper ; NVPTX-SAME: (i16 zeroext [[TMP0:%.*]], i32 [[TMP1:%.*]]) #[[ATTR3:[0-9]+]] { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: ret void ; entry: ret void } ; Function Attrs: alwaysinline convergent norecurse nounwind define void @__omp_outlined_not_spmd_amenable_external(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable_external ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTGLOBAL_TID_]], ptr [[DOTBOUND_TID_]]) ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable_external ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: call void @__omp_outlined_not_spmd_amenable(ptr [[DOTGLOBAL_TID_]], ptr [[DOTBOUND_TID_]]) ; NVPTX-NEXT: ret void ; call void @__omp_outlined_not_spmd_amenable(ptr %.global_tid., ptr %.bound_tid.); ret void } define internal void @__omp_outlined_not_spmd_amenable(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable ; AMDGPU-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; AMDGPU-NEXT: entry: ; AMDGPU-NEXT: call void @unknown() #[[ATTR7]] ; AMDGPU-NEXT: ret void ; ; NVPTX-LABEL: define {{[^@]+}}@__omp_outlined_not_spmd_amenable ; NVPTX-SAME: (ptr noalias [[DOTGLOBAL_TID_:%.*]], ptr noalias [[DOTBOUND_TID_:%.*]]) { ; NVPTX-NEXT: entry: ; NVPTX-NEXT: call void @unknown() #[[ATTR7]] ; NVPTX-NEXT: ret void ; entry: call void @unknown() #11 ret void } ; Function Attrs: nosync nounwind declare void @__kmpc_free_shared(ptr nocapture, i64) #8 ; Function Attrs: nofree nosync nounwind declare ptr @__kmpc_alloc_shared(i64) #7 ; Function Attrs: convergent declare void @use(ptr nocapture) #5 ; Function Attrs: convergent declare void @unknown() #2 declare void @unknowni32p(ptr) #2 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn declare void @llvm.lifetime.start.p0(i64 immarg, ptr nocapture) #1 ; Make it a weak definition so we will apply custom state machine rewriting but can't use the body in the reasoning. define weak i32 @__kmpc_target_init(ptr, ptr) { ; ; ; AMDGPU-LABEL: define {{[^@]+}}@__kmpc_target_init ; AMDGPU-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) { ; AMDGPU-NEXT: ret i32 0 ; ; NVPTX-LABEL: define {{[^@]+}}@__kmpc_target_init ; NVPTX-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]]) { ; NVPTX-NEXT: ret i32 0 ; ret i32 0 } declare void @__kmpc_get_shared_variables(ptr) ; Function Attrs: alwaysinline declare void @__kmpc_parallel_51(ptr, i32, i32, i32, i32, ptr, ptr, ptr, i64) #4 ; Function Attrs: argmemonly mustprogress nofree nosync nounwind willreturn declare void @llvm.lifetime.end.p0(i64 immarg, ptr nocapture) #1 ; Function Attrs: convergent declare void @spmd_amenable() #5 ; Function Attrs: nounwind declare i32 @__kmpc_global_thread_num(ptr) #6 declare void @__kmpc_target_deinit() ; Function Attrs: alwaysinline convergent norecurse nounwind define internal void @__omp_outlined__9(ptr noalias %.global_tid., ptr noalias %.bound_tid.) { ; ; entry: call void @unknown() #11 ret void } ; Function Attrs: convergent norecurse nounwind define internal void @__omp_outlined__9_wrapper(i16 zeroext %0, i32 %1) #3 { ; ; entry: %.addr1 = alloca i32, align 4 %.zero.addr = alloca i32, align 4 %global_args = alloca ptr, align 8 store i32 %1, ptr %.addr1, align 4, !tbaa !18 store i32 0, ptr %.zero.addr, align 4 call void @__kmpc_get_shared_variables(ptr %global_args) call void @__omp_outlined__9(ptr %.addr1, ptr %.zero.addr) #6 ret void } declare fastcc i32 @__kmpc_get_hardware_thread_id_in_block(); attributes #0 = { alwaysinline convergent norecurse nounwind "kernel" } attributes #1 = { argmemonly mustprogress nofree nosync nounwind willreturn } attributes #2 = { convergent } attributes #3 = { convergent norecurse nounwind } attributes #4 = { alwaysinline } attributes #5 = { convergent "llvm.assume"="ompx_spmd_amenable" } attributes #6 = { nounwind } attributes #7 = { nofree nosync nounwind } attributes #8 = { nosync nounwind } attributes #9 = { alwaysinline convergent nounwind } attributes #10 = { convergent "llvm.assume"="ompx_spmd_amenable" } attributes #11 = { convergent } !omp_offload.info = !{!0, !1, !2, !3, !4, !5} !nvvm.annotations = !{!6, !7, !8, !9, !10, !11} !llvm.module.flags = !{!12, !13, !14, !15, !16} !llvm.ident = !{!17} !0 = !{i32 0, i32 64770, i32 541341486, !"", i32 74, i32 5} !1 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_stack_var", i32 20, i32 1} !2 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop", i32 5, i32 0} !3 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var", i32 35, i32 2} !4 = !{i32 0, i32 64770, i32 541341486, !"do_not_spmdize_target", i32 65, i32 4} !5 = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var_guarded", i32 50, i32 3} !6 = !{ptr @spmd_callees, !"kernel", i32 1} !7 = !{ptr @spmd_and_non_spmd_callees_metadata, !"kernel", i32 1} !8 = !{ptr @spmd_and_non_spmd_callee, !"kernel", i32 1} !9 = !{ptr @spmd_callees_metadata, !"kernel", i32 1} !10 = !{i32 1} !11 = !{i32 1} !12 = !{i32 1, !"wchar_size", i32 4} !13 = !{i32 7, !"openmp", i32 50} !14 = !{i32 7, !"openmp-device", i32 50} !15 = !{i32 8, !"PIC Level", i32 2} !16 = !{i32 7, !"frame-pointer", i32 2} !17 = !{!"clang version 14.0.0"} !18 = !{!19, !19, i64 0} !19 = !{!"int", !20, i64 0} !20 = !{!"omnipotent char", !21, i64 0} !21 = !{!"Simple C/C++ TBAA"} !22 = distinct !{!22, !23, !24} !23 = !{!"llvm.loop.mustprogress"} !24 = !{!"llvm.loop.unroll.disable"} !25 = distinct !{!25, !23, !24} !26 = !{!27, !27, i64 0} !27 = !{!"any pointer", !20, i64 0} !28 = distinct !{!28, !23, !24} !29 = distinct !{!29, !23, !24} !30 = !{!31, !27, i64 0} !31 = !{ptr @__omp_outlined_spmd_amenable_external, ptr @__omp_outlined_not_spmd_amenable} !32 = !{ptr @__omp_outlined_spmd_amenable_external, ptr @__omp_outlined_not_spmd_amenable_external} ;. ; AMDGPU: attributes #[[ATTR0]] = { alwaysinline convergent norecurse nounwind "kernel" } ; AMDGPU: attributes #[[ATTR1]] = { norecurse } ; AMDGPU: attributes #[[ATTR2]] = { convergent norecurse nounwind } ; AMDGPU: attributes #[[ATTR3]] = { norecurse nounwind } ; AMDGPU: attributes #[[ATTR4:[0-9]+]] = { nosync nounwind } ; AMDGPU: attributes #[[ATTR5:[0-9]+]] = { nofree nosync nounwind allocsize(0) } ; AMDGPU: attributes #[[ATTR6]] = { convergent "llvm.assume"="ompx_spmd_amenable" } ; AMDGPU: attributes #[[ATTR7]] = { convergent } ; AMDGPU: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } ; AMDGPU: attributes #[[ATTR9:[0-9]+]] = { alwaysinline } ; AMDGPU: attributes #[[ATTR10]] = { nounwind } ; AMDGPU: attributes #[[ATTR11:[0-9]+]] = { convergent nounwind } ;. ; NVPTX: attributes #[[ATTR0]] = { alwaysinline convergent norecurse nounwind "kernel" } ; NVPTX: attributes #[[ATTR1]] = { norecurse } ; NVPTX: attributes #[[ATTR2]] = { convergent norecurse nounwind } ; NVPTX: attributes #[[ATTR3]] = { norecurse nounwind } ; NVPTX: attributes #[[ATTR4:[0-9]+]] = { nosync nounwind } ; NVPTX: attributes #[[ATTR5:[0-9]+]] = { nofree nosync nounwind allocsize(0) } ; NVPTX: attributes #[[ATTR6]] = { convergent "llvm.assume"="ompx_spmd_amenable" } ; NVPTX: attributes #[[ATTR7]] = { convergent } ; NVPTX: attributes #[[ATTR8:[0-9]+]] = { nocallback nofree nosync nounwind willreturn memory(argmem: readwrite) } ; NVPTX: attributes #[[ATTR9:[0-9]+]] = { alwaysinline } ; NVPTX: attributes #[[ATTR10]] = { nounwind } ; NVPTX: attributes #[[ATTR11:[0-9]+]] = { convergent nounwind } ;. ; AMDGPU: [[META0:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"", i32 74, i32 5} ; AMDGPU: [[META1:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_stack_var", i32 20, i32 1} ; AMDGPU: [[META2:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop", i32 5, i32 0} ; AMDGPU: [[META3:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var", i32 35, i32 2} ; AMDGPU: [[META4:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"do_not_spmdize_target", i32 65, i32 4} ; AMDGPU: [[META5:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var_guarded", i32 50, i32 3} ; AMDGPU: [[META6:![0-9]+]] = !{ptr @spmd_callees, !"kernel", i32 1} ; AMDGPU: [[META7:![0-9]+]] = !{ptr @spmd_and_non_spmd_callees_metadata, !"kernel", i32 1} ; AMDGPU: [[META8:![0-9]+]] = !{ptr @spmd_and_non_spmd_callee, !"kernel", i32 1} ; AMDGPU: [[META9:![0-9]+]] = !{ptr @spmd_callees_metadata, !"kernel", i32 1} ; AMDGPU: [[META10:![0-9]+]] = !{i32 1} ; AMDGPU: [[META11:![0-9]+]] = !{i32 1, !"wchar_size", i32 4} ; AMDGPU: [[META12:![0-9]+]] = !{i32 7, !"openmp", i32 50} ; AMDGPU: [[META13:![0-9]+]] = !{i32 7, !"openmp-device", i32 50} ; AMDGPU: [[META14:![0-9]+]] = !{i32 8, !"PIC Level", i32 2} ; AMDGPU: [[META15:![0-9]+]] = !{i32 7, !"frame-pointer", i32 2} ; AMDGPU: [[META16:![0-9]+]] = !{!"clang version 14.0.0"} ; AMDGPU: [[TBAA17]] = !{!18, !18, i64 0} ; AMDGPU: [[META18:![0-9]+]] = !{!"int", !19, i64 0} ; AMDGPU: [[META19:![0-9]+]] = !{!"omnipotent char", !20, i64 0} ; AMDGPU: [[META20:![0-9]+]] = !{!"Simple C/C++ TBAA"} ; AMDGPU: [[LOOP21]] = distinct !{!21, !22, !23} ; AMDGPU: [[META22:![0-9]+]] = !{!"llvm.loop.mustprogress"} ; AMDGPU: [[META23:![0-9]+]] = !{!"llvm.loop.unroll.disable"} ; AMDGPU: [[LOOP24]] = distinct !{!24, !22, !23} ; AMDGPU: [[TBAA25]] = !{!26, !26, i64 0} ; AMDGPU: [[META26:![0-9]+]] = !{!"any pointer", !19, i64 0} ; AMDGPU: [[LOOP27]] = distinct !{!27, !22, !23} ; AMDGPU: [[LOOP28]] = distinct !{!28, !22, !23} ;. ; NVPTX: [[META0:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"", i32 74, i32 5} ; NVPTX: [[META1:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_stack_var", i32 20, i32 1} ; NVPTX: [[META2:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop", i32 5, i32 0} ; NVPTX: [[META3:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var", i32 35, i32 2} ; NVPTX: [[META4:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"do_not_spmdize_target", i32 65, i32 4} ; NVPTX: [[META5:![0-9]+]] = !{i32 0, i32 64770, i32 541341486, !"sequential_loop_to_shared_var_guarded", i32 50, i32 3} ; NVPTX: [[META6:![0-9]+]] = !{ptr @spmd_callees, !"kernel", i32 1} ; NVPTX: [[META7:![0-9]+]] = !{ptr @spmd_and_non_spmd_callees_metadata, !"kernel", i32 1} ; NVPTX: [[META8:![0-9]+]] = !{ptr @spmd_and_non_spmd_callee, !"kernel", i32 1} ; NVPTX: [[META9:![0-9]+]] = !{ptr @spmd_callees_metadata, !"kernel", i32 1} ; NVPTX: [[META10:![0-9]+]] = !{i32 1} ; NVPTX: [[META11:![0-9]+]] = !{i32 1, !"wchar_size", i32 4} ; NVPTX: [[META12:![0-9]+]] = !{i32 7, !"openmp", i32 50} ; NVPTX: [[META13:![0-9]+]] = !{i32 7, !"openmp-device", i32 50} ; NVPTX: [[META14:![0-9]+]] = !{i32 8, !"PIC Level", i32 2} ; NVPTX: [[META15:![0-9]+]] = !{i32 7, !"frame-pointer", i32 2} ; NVPTX: [[META16:![0-9]+]] = !{!"clang version 14.0.0"} ; NVPTX: [[TBAA17]] = !{!18, !18, i64 0} ; NVPTX: [[META18:![0-9]+]] = !{!"int", !19, i64 0} ; NVPTX: [[META19:![0-9]+]] = !{!"omnipotent char", !20, i64 0} ; NVPTX: [[META20:![0-9]+]] = !{!"Simple C/C++ TBAA"} ; NVPTX: [[LOOP21]] = distinct !{!21, !22, !23} ; NVPTX: [[META22:![0-9]+]] = !{!"llvm.loop.mustprogress"} ; NVPTX: [[META23:![0-9]+]] = !{!"llvm.loop.unroll.disable"} ; NVPTX: [[LOOP24]] = distinct !{!24, !22, !23} ; NVPTX: [[TBAA25]] = !{!26, !26, i64 0} ; NVPTX: [[META26:![0-9]+]] = !{!"any pointer", !19, i64 0} ; NVPTX: [[LOOP27]] = distinct !{!27, !22, !23} ; NVPTX: [[LOOP28]] = distinct !{!28, !22, !23} ;.