// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \ // RUN: -disable-O0-optnone -mvscale-min=4 -mvscale-max=4 \ // RUN: -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s // REQUIRES: aarch64-registered-target #include #define N 512 typedef svint8_t fixed_int8_t __attribute__((arm_sve_vector_bits(N))); typedef svint16_t fixed_int16_t __attribute__((arm_sve_vector_bits(N))); typedef svint32_t fixed_int32_t __attribute__((arm_sve_vector_bits(N))); typedef svint64_t fixed_int64_t __attribute__((arm_sve_vector_bits(N))); typedef svuint8_t fixed_uint8_t __attribute__((arm_sve_vector_bits(N))); typedef svuint16_t fixed_uint16_t __attribute__((arm_sve_vector_bits(N))); typedef svuint32_t fixed_uint32_t __attribute__((arm_sve_vector_bits(N))); typedef svuint64_t fixed_uint64_t __attribute__((arm_sve_vector_bits(N))); typedef svfloat16_t fixed_float16_t __attribute__((arm_sve_vector_bits(N))); typedef svfloat32_t fixed_float32_t __attribute__((arm_sve_vector_bits(N))); typedef svfloat64_t fixed_float64_t __attribute__((arm_sve_vector_bits(N))); typedef svbool_t fixed_bool_t __attribute__((arm_sve_vector_bits(N))); // AND // CHECK-LABEL: @and_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to // CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to // CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[AND]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // fixed_bool_t and_bool(fixed_bool_t a, fixed_bool_t b) { return a & b; } // CHECK-LABEL: @and_i8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t and_i8(fixed_int8_t a, fixed_int8_t b) { return a & b; } // CHECK-LABEL: @and_i16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t and_i16(fixed_int16_t a, fixed_int16_t b) { return a & b; } // CHECK-LABEL: @and_i32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t and_i32(fixed_int32_t a, fixed_int32_t b) { return a & b; } // CHECK-LABEL: @and_i64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t and_i64(fixed_int64_t a, fixed_int64_t b) { return a & b; } // CHECK-LABEL: @and_u8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t and_u8(fixed_uint8_t a, fixed_uint8_t b) { return a & b; } // CHECK-LABEL: @and_u16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t and_u16(fixed_uint16_t a, fixed_uint16_t b) { return a & b; } // CHECK-LABEL: @and_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t and_u32(fixed_uint32_t a, fixed_uint32_t b) { return a & b; } // CHECK-LABEL: @and_u64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[AND:%.*]] = and <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[AND]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t and_u64(fixed_uint64_t a, fixed_uint64_t b) { return a & b; } // OR // CHECK-LABEL: @or_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to // CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to // CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[OR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // fixed_bool_t or_bool(fixed_bool_t a, fixed_bool_t b) { return a | b; } // CHECK-LABEL: @or_i8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t or_i8(fixed_int8_t a, fixed_int8_t b) { return a | b; } // CHECK-LABEL: @or_i16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t or_i16(fixed_int16_t a, fixed_int16_t b) { return a | b; } // CHECK-LABEL: @or_i32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t or_i32(fixed_int32_t a, fixed_int32_t b) { return a | b; } // CHECK-LABEL: @or_i64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t or_i64(fixed_int64_t a, fixed_int64_t b) { return a | b; } // CHECK-LABEL: @or_u8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t or_u8(fixed_uint8_t a, fixed_uint8_t b) { return a | b; } // CHECK-LABEL: @or_u16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t or_u16(fixed_uint16_t a, fixed_uint16_t b) { return a | b; } // CHECK-LABEL: @or_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t or_u32(fixed_uint32_t a, fixed_uint32_t b) { return a | b; } // CHECK-LABEL: @or_u64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[OR:%.*]] = or <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[OR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t or_u64(fixed_uint64_t a, fixed_uint64_t b) { return a | b; } // XOR // CHECK-LABEL: @xor_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to // CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[B_COERCE:%.*]] = bitcast [[TMP1:%.*]] to // CHECK-NEXT: [[B:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[B_COERCE]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[XOR]], i64 0) // CHECK-NEXT: [[TMP2:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP2]] // fixed_bool_t xor_bool(fixed_bool_t a, fixed_bool_t b) { return a ^ b; } // CHECK-LABEL: @xor_i8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t xor_i8(fixed_int8_t a, fixed_int8_t b) { return a ^ b; } // CHECK-LABEL: @xor_i16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t xor_i16(fixed_int16_t a, fixed_int16_t b) { return a ^ b; } // CHECK-LABEL: @xor_i32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t xor_i32(fixed_int32_t a, fixed_int32_t b) { return a ^ b; } // CHECK-LABEL: @xor_i64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t xor_i64(fixed_int64_t a, fixed_int64_t b) { return a ^ b; } // CHECK-LABEL: @xor_u8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <64 x i8> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t xor_u8(fixed_uint8_t a, fixed_uint8_t b) { return a ^ b; } // CHECK-LABEL: @xor_u16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <32 x i16> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t xor_u16(fixed_uint16_t a, fixed_uint16_t b) { return a ^ b; } // CHECK-LABEL: @xor_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <16 x i32> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t xor_u32(fixed_uint32_t a, fixed_uint32_t b) { return a ^ b; } // CHECK-LABEL: @xor_u64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[XOR:%.*]] = xor <8 x i64> [[A]], [[B]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[XOR]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t xor_u64(fixed_uint64_t a, fixed_uint64_t b) { return a ^ b; } // NEG // CHECK-LABEL: @neg_bool( // CHECK-NEXT: entry: // CHECK-NEXT: [[A_COERCE:%.*]] = bitcast [[TMP0:%.*]] to // CHECK-NEXT: [[A:%.*]] = call <8 x i8> @llvm.vector.extract.v8i8.nxv2i8( [[A_COERCE]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i8> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i8.v8i8( undef, <8 x i8> [[NEG]], i64 0) // CHECK-NEXT: [[TMP1:%.*]] = bitcast [[CASTSCALABLESVE]] to // CHECK-NEXT: ret [[TMP1]] // fixed_bool_t neg_bool(fixed_bool_t a) { return ~a; } // CHECK-LABEL: @neg_i8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int8_t neg_i8(fixed_int8_t a) { return ~a; } // CHECK-LABEL: @neg_i16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int16_t neg_i16(fixed_int16_t a) { return ~a; } // CHECK-LABEL: @neg_i32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int32_t neg_i32(fixed_int32_t a) { return ~a; } // CHECK-LABEL: @neg_i64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_int64_t neg_i64(fixed_int64_t a) { return ~a; } // CHECK-LABEL: @neg_u8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <64 x i8> @llvm.vector.extract.v64i8.nxv16i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <64 x i8> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv16i8.v64i8( undef, <64 x i8> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint8_t neg_u8(fixed_uint8_t a) { return ~a; } // CHECK-LABEL: @neg_u16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i16> @llvm.vector.extract.v32i16.nxv8i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <32 x i16> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv8i16.v32i16( undef, <32 x i16> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint16_t neg_u16(fixed_uint16_t a) { return ~a; } // CHECK-LABEL: @neg_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i32> @llvm.vector.extract.v16i32.nxv4i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <16 x i32> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint32_t neg_u32(fixed_uint32_t a) { return ~a; } // CHECK-LABEL: @neg_u64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i64> @llvm.vector.extract.v8i64.nxv2i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[NEG:%.*]] = xor <8 x i64> [[A]], // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.vector.insert.nxv2i64.v8i64( undef, <8 x i64> [[NEG]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // fixed_uint64_t neg_u64(fixed_uint64_t a) { return ~a; }