// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple s390x-linux-gnu -O1 -emit-llvm %s -o - | FileCheck %s // // Test GNU atomic builtins for int64_t. #include #include // CHECK-LABEL: @f1( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f1(int64_t *Ptr) { return __atomic_load_n(Ptr, memory_order_seq_cst); } // CHECK-LABEL: @f2( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = load atomic i64, ptr [[PTR:%.*]] seq_cst, align 8 // CHECK-NEXT: store i64 [[TMP0]], ptr [[RET:%.*]], align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f2(int64_t *Ptr, int64_t *Ret) { __atomic_load(Ptr, Ret, memory_order_seq_cst); return *Ret; } // CHECK-LABEL: @f3( // CHECK-NEXT: entry: // CHECK-NEXT: store atomic i64 [[VAL:%.*]], ptr [[PTR:%.*]] seq_cst, align 8 // CHECK-NEXT: ret void // void f3(int64_t *Ptr, int64_t Val) { __atomic_store_n(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f4( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8 // CHECK-NEXT: store atomic i64 [[TMP0]], ptr [[PTR:%.*]] seq_cst, align 8 // CHECK-NEXT: ret void // void f4(int64_t *Ptr, int64_t *Val) { __atomic_store(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f5( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f5(int64_t *Ptr, int64_t Val) { return __atomic_exchange_n(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f6( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[VAL:%.*]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = atomicrmw xchg ptr [[PTR:%.*]], i64 [[TMP0]] seq_cst, align 8 // CHECK-NEXT: store i64 [[TMP1]], ptr [[RET:%.*]], align 8 // CHECK-NEXT: ret i64 [[TMP1]] // int64_t f6(int64_t *Ptr, int64_t *Val, int64_t *Ret) { __atomic_exchange(Ptr, Val, Ret, memory_order_seq_cst); return *Ret; } // CHECK-LABEL: @f7( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[DES:%.*]] seq_cst seq_cst, align 8 // CHECK-NEXT: [[TMP2:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1 // CHECK-NEXT: br i1 [[TMP2]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] // CHECK: cmpxchg.store_expected: // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0 // CHECK-NEXT: store i64 [[TMP3]], ptr [[EXP]], align 8 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]] // CHECK: cmpxchg.continue: // CHECK-NEXT: ret i1 [[TMP2]] // _Bool f7(int64_t *Ptr, int64_t *Exp, int64_t Des) { return __atomic_compare_exchange_n(Ptr, Exp, Des, 0, memory_order_seq_cst, memory_order_seq_cst); } // CHECK-LABEL: @f8( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = load i64, ptr [[EXP:%.*]], align 8 // CHECK-NEXT: [[TMP1:%.*]] = load i64, ptr [[DES:%.*]], align 8 // CHECK-NEXT: [[TMP2:%.*]] = cmpxchg ptr [[PTR:%.*]], i64 [[TMP0]], i64 [[TMP1]] seq_cst seq_cst, align 8 // CHECK-NEXT: [[TMP3:%.*]] = extractvalue { i64, i1 } [[TMP2]], 1 // CHECK-NEXT: br i1 [[TMP3]], label [[CMPXCHG_CONTINUE:%.*]], label [[CMPXCHG_STORE_EXPECTED:%.*]] // CHECK: cmpxchg.store_expected: // CHECK-NEXT: [[TMP4:%.*]] = extractvalue { i64, i1 } [[TMP2]], 0 // CHECK-NEXT: store i64 [[TMP4]], ptr [[EXP]], align 8 // CHECK-NEXT: br label [[CMPXCHG_CONTINUE]] // CHECK: cmpxchg.continue: // CHECK-NEXT: ret i1 [[TMP3]] // _Bool f8(int64_t *Ptr, int64_t *Exp, int64_t *Des) { return __atomic_compare_exchange(Ptr, Exp, Des, 0, memory_order_seq_cst, memory_order_seq_cst); } // CHECK-LABEL: @f9( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: [[TMP1:%.*]] = add i64 [[TMP0]], [[VAL]] // CHECK-NEXT: ret i64 [[TMP1]] // int64_t f9(int64_t *Ptr, int64_t Val) { return __atomic_add_fetch(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f10( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: [[TMP1:%.*]] = sub i64 [[TMP0]], [[VAL]] // CHECK-NEXT: ret i64 [[TMP1]] // int64_t f10(int64_t *Ptr, int64_t Val) { return __atomic_sub_fetch(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f11( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]] // CHECK-NEXT: ret i64 [[TMP1]] // int64_t f11(int64_t *Ptr, int64_t Val) { return __atomic_and_fetch(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f12( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: [[TMP1:%.*]] = xor i64 [[TMP0]], [[VAL]] // CHECK-NEXT: ret i64 [[TMP1]] // int64_t f12(int64_t *Ptr, int64_t Val) { return __atomic_xor_fetch(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f13( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: [[TMP1:%.*]] = or i64 [[TMP0]], [[VAL]] // CHECK-NEXT: ret i64 [[TMP1]] // int64_t f13(int64_t *Ptr, int64_t Val) { return __atomic_or_fetch(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f14( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: [[TMP1:%.*]] = and i64 [[TMP0]], [[VAL]] // CHECK-NEXT: [[TMP2:%.*]] = xor i64 [[TMP1]], -1 // CHECK-NEXT: ret i64 [[TMP2]] // int64_t f14(int64_t *Ptr, int64_t Val) { return __atomic_nand_fetch(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f15( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw add ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f15(int64_t *Ptr, int64_t Val) { return __atomic_fetch_add(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f16( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw sub ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f16(int64_t *Ptr, int64_t Val) { return __atomic_fetch_sub(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f17( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw and ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f17(int64_t *Ptr, int64_t Val) { return __atomic_fetch_and(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f18( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw xor ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f18(int64_t *Ptr, int64_t Val) { return __atomic_fetch_xor(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f19( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw or ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f19(int64_t *Ptr, int64_t Val) { return __atomic_fetch_or(Ptr, Val, memory_order_seq_cst); } // CHECK-LABEL: @f20( // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = atomicrmw nand ptr [[PTR:%.*]], i64 [[VAL:%.*]] seq_cst, align 8 // CHECK-NEXT: ret i64 [[TMP0]] // int64_t f20(int64_t *Ptr, int64_t Val) { return __atomic_fetch_nand(Ptr, Val, memory_order_seq_cst); }