; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs ; RUN: opt -S -passes=verify,iroutliner -ir-outlining-no-cost < %s | FileCheck %s ; This test checks that we sucessfully outline identical memcpy var arg ; intrinsics, but not the var arg instruction itself. declare void @llvm.va_start(ptr) declare void @llvm.va_copy(ptr, ptr) declare void @llvm.va_end(ptr) define i32 @func1(i32 %a, double %b, ptr %v, ...) nounwind { entry: %a.addr = alloca i32, align 4 %b.addr = alloca double, align 8 %ap = alloca ptr, align 4 %c = alloca i32, align 4 store i32 %a, ptr %a.addr, align 4 store double %b, ptr %b.addr, align 8 call void @llvm.va_start(ptr %ap) %0 = va_arg ptr %ap, i32 call void @llvm.va_copy(ptr %v, ptr %ap) call void @llvm.va_end(ptr %ap) store i32 %0, ptr %c, align 4 %tmp = load i32, ptr %c, align 4 ret i32 %tmp } define i32 @func2(i32 %a, double %b, ptr %v, ...) nounwind { entry: %a.addr = alloca i32, align 4 %b.addr = alloca double, align 8 %ap = alloca ptr, align 4 %c = alloca i32, align 4 store i32 %a, ptr %a.addr, align 4 store double %b, ptr %b.addr, align 8 call void @llvm.va_start(ptr %ap) %0 = va_arg ptr %ap, i32 call void @llvm.va_copy(ptr %v, ptr %ap) call void @llvm.va_end(ptr %ap) store i32 %0, ptr %c, align 4 %tmp = load i32, ptr %c, align 4 ret i32 %tmp } ; CHECK-LABEL: define {{[^@]+}}@func1 ; CHECK-SAME: (i32 [[A:%.*]], double [[B:%.*]], ptr [[V:%.*]], ...) #[[ATTR1:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 ; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4 ; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 ; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8 ; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]]) ; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]]) ; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4 ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]]) ; CHECK-NEXT: ret i32 [[TMP_RELOAD]] ; ; ; CHECK-LABEL: define {{[^@]+}}@func2 ; CHECK-SAME: (i32 [[A:%.*]], double [[B:%.*]], ptr [[V:%.*]], ...) #[[ATTR1]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[TMP_LOC:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[A_ADDR:%.*]] = alloca i32, align 4 ; CHECK-NEXT: [[B_ADDR:%.*]] = alloca double, align 8 ; CHECK-NEXT: [[AP:%.*]] = alloca ptr, align 4 ; CHECK-NEXT: [[C:%.*]] = alloca i32, align 4 ; CHECK-NEXT: store i32 [[A]], ptr [[A_ADDR]], align 4 ; CHECK-NEXT: store double [[B]], ptr [[B_ADDR]], align 8 ; CHECK-NEXT: call void @llvm.va_start(ptr [[AP]]) ; CHECK-NEXT: [[TMP0:%.*]] = va_arg ptr [[AP]], i32 ; CHECK-NEXT: call void @llvm.lifetime.start.p0(i64 -1, ptr [[TMP_LOC]]) ; CHECK-NEXT: call void @outlined_ir_func_0(ptr [[V]], ptr [[AP]], i32 [[TMP0]], ptr [[C]], ptr [[TMP_LOC]]) ; CHECK-NEXT: [[TMP_RELOAD:%.*]] = load i32, ptr [[TMP_LOC]], align 4 ; CHECK-NEXT: call void @llvm.lifetime.end.p0(i64 -1, ptr [[TMP_LOC]]) ; CHECK-NEXT: ret i32 [[TMP_RELOAD]] ; ; ; CHECK-LABEL: define {{[^@]+}}@outlined_ir_func_0 ; CHECK-SAME: (ptr [[TMP0:%.*]], ptr [[TMP1:%.*]], i32 [[TMP2:%.*]], ptr [[TMP3:%.*]], ptr [[TMP4:%.*]]) #[[ATTR3:[0-9]+]] { ; CHECK-NEXT: newFuncRoot: ; CHECK-NEXT: br label [[ENTRY_TO_OUTLINE:%.*]] ; CHECK: entry_to_outline: ; CHECK-NEXT: call void @llvm.va_copy(ptr [[TMP0]], ptr [[TMP1]]) ; CHECK-NEXT: call void @llvm.va_end(ptr [[TMP1]]) ; CHECK-NEXT: store i32 [[TMP2]], ptr [[TMP3]], align 4 ; CHECK-NEXT: [[TMP:%.*]] = load i32, ptr [[TMP3]], align 4 ; CHECK-NEXT: br label [[ENTRY_AFTER_OUTLINE_EXITSTUB:%.*]] ; CHECK: entry_after_outline.exitStub: ; CHECK-NEXT: store i32 [[TMP]], ptr [[TMP4]], align 4 ; CHECK-NEXT: ret void ;