// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -mvscale-min=1 -mvscale-max=1 -S -O1 -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s --check-prefix=CHECK-128 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -target-feature +bf16 -mvscale-min=4 -mvscale-max=4 -S -O1 -emit-llvm -o - %s -fhalf-no-semantic-interposition | FileCheck %s --check-prefix=CHECK-512 // REQUIRES: aarch64-registered-target #include #define N __ARM_FEATURE_SVE_BITS typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(N))); typedef svbfloat16_t fixed_bfloat16_t __attribute__((arm_sve_vector_bits(N))); typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); fixed_int64_t global_i64; fixed_bfloat16_t global_bf16; fixed_bool_t global_bool; //===----------------------------------------------------------------------===// // WRITES //===----------------------------------------------------------------------===// // CHECK-128-LABEL: @write_global_i64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <2 x i64> @llvm.vector.extract.v2i64.nxv2i64( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <2 x i64> [[CASTFIXEDSVE]], ptr @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_i64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <8 x i64> [[CASTFIXEDSVE]], ptr @global_i64, align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-512-NEXT: ret void // void write_global_i64(svint64_t v) { global_i64 = v; } // CHECK-128-LABEL: @write_global_bf16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <8 x bfloat> @llvm.vector.extract.v8bf16.nxv8bf16( [[V:%.*]], i64 0) // CHECK-128-NEXT: store <8 x bfloat> [[CASTFIXEDSVE]], ptr @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bf16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <32 x bfloat> @llvm.vector.extract.v32bf16.nxv8bf16( [[V:%.*]], i64 0) // CHECK-512-NEXT: store <32 x bfloat> [[CASTFIXEDSVE]], ptr @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_global_bf16(svbfloat16_t v) { global_bf16 = v; } // CHECK-128-LABEL: @write_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <2 x i8> @llvm.vector.extract.v2i8.nxv2i8( [[TMP0]], i64 0) // CHECK-128-NEXT: store <2 x i8> [[CASTFIXEDSVE]], ptr @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = bitcast [[V:%.*]] to // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = tail call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[TMP0]], i64 0) // CHECK-512-NEXT: store <8 x i8> [[CASTFIXEDSVE]], ptr @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // void write_global_bool(svbool_t v) { global_bool = v; } //===----------------------------------------------------------------------===// // READS //===----------------------------------------------------------------------===// // CHECK-128-LABEL: @read_global_i64( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @global_i64, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call @llvm.vector.insert.nxv2i64.v2i64( undef, <2 x i64> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_i64( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i64>, ptr @global_i64, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = tail call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svint64_t read_global_i64() { return global_i64; } // CHECK-128-LABEL: @read_global_bf16( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <8 x bfloat>, ptr @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call @llvm.vector.insert.nxv8bf16.v8bf16( undef, <8 x bfloat> [[TMP0]], i64 0) // CHECK-128-NEXT: ret [[CASTSCALABLESVE]] // // CHECK-512-LABEL: @read_global_bf16( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <32 x bfloat>, ptr @global_bf16, align 16, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = tail call @llvm.vector.insert.nxv8bf16.v32bf16( undef, <32 x bfloat> [[TMP0]], i64 0) // CHECK-512-NEXT: ret [[CASTSCALABLESVE]] // svbfloat16_t read_global_bf16() { return global_bf16; } // CHECK-128-LABEL: @read_global_bool( // CHECK-128-NEXT: entry: // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, ptr @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTSCALABLESVE:%.*]] = tail call @llvm.vector.insert.nxv2i8.v2i8( undef, <2 x i8> [[TMP0]], i64 0) // CHECK-128-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_global_bool( // CHECK-512-NEXT: entry: // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, ptr @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTSCALABLESVE:%.*]] = tail call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[TMP0]], i64 0) // CHECK-512-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_global_bool() { return global_bool; }