// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py // RUN: %clang_cc1 -triple riscv64-none-linux-gnu -target-feature +zve64d \ // RUN: -target-feature +f -target-feature +d -disable-O0-optnone \ // RUN: -mvscale-min=4 -mvscale-max=4 -emit-llvm -o - %s | \ // RUN: opt -S -passes=sroa | FileCheck %s // REQUIRES: riscv-registered-target #include typedef __rvv_int8m1_t vint8m1_t; typedef __rvv_uint8m1_t vuint8m1_t; typedef __rvv_int16m1_t vint16m1_t; typedef __rvv_uint16m1_t vuint16m1_t; typedef __rvv_int32m1_t vint32m1_t; typedef __rvv_uint32m1_t vuint32m1_t; typedef __rvv_int64m1_t vint64m1_t; typedef __rvv_uint64m1_t vuint64m1_t; typedef __rvv_float32m1_t vfloat32m1_t; typedef __rvv_float64m1_t vfloat64m1_t; typedef vint8m1_t fixed_int8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vint16m1_t fixed_int16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vint32m1_t fixed_int32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vint64m1_t fixed_int64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vuint8m1_t fixed_uint8m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vuint16m1_t fixed_uint16m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vuint32m1_t fixed_uint32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vuint64m1_t fixed_uint64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vfloat32m1_t fixed_float32m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); typedef vfloat64m1_t fixed_float64m1_t __attribute__((riscv_rvv_vector_bits(__riscv_v_fixed_vlen))); // CHECK-LABEL: @lshift_i8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int8m1_t lshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) { return a << b; } // CHECK-LABEL: @rshift_i8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i8> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int8m1_t rshift_i8(fixed_int8m1_t a, fixed_int8m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_u8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint8m1_t lshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { return a << b; } // CHECK-LABEL: @rshift_u8( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i8> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint8m1_t rshift_u8(fixed_uint8m1_t a, fixed_uint8m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_i16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int16m1_t lshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) { return a << b; } // CHECK-LABEL: @rshift_i16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i16> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int16m1_t rshift_i16(fixed_int16m1_t a, fixed_int16m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_u16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint16m1_t lshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { return a << b; } // CHECK-LABEL: @rshift_u16( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i16> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint16m1_t rshift_u16(fixed_uint16m1_t a, fixed_uint16m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_i32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t lshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) { return a << b; } // CHECK-LABEL: @rshift_i32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i32> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t rshift_i32(fixed_int32m1_t a, fixed_int32m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint32m1_t lshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { return a << b; } // CHECK-LABEL: @rshift_u32( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i32> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint32m1_t rshift_u32(fixed_uint32m1_t a, fixed_uint32m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_i64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int64m1_t lshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) { return a << b; } // CHECK-LABEL: @rshift_i64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i64> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int64m1_t rshift_i64(fixed_int64m1_t a, fixed_int64m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_u64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint64m1_t lshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { return a << b; } // CHECK-LABEL: @rshift_u64( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[B:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[B_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i64> [[A]], [[B]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint64m1_t rshift_u64(fixed_uint64m1_t a, fixed_uint64m1_t b) { return a >> b; } // CHECK-LABEL: @lshift_i8_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8> // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int8m1_t lshift_i8_rsplat(fixed_int8m1_t a, int8_t b) { return a << b; } // CHECK-LABEL: @lshift_i8_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int8m1_t lshift_i8_lsplat(fixed_int8m1_t a, int8_t b) { return b << a; } // CHECK-LABEL: @rshift_i8_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8> // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i8> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int8m1_t rshift_i8_rsplat(fixed_int8m1_t a, int8_t b) { return a >> b; } // CHECK-LABEL: @rshift_i8_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <32 x i8> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int8m1_t rshift_i8_lsplat(fixed_int8m1_t a, int8_t b) { return b >> a; } // CHECK-LABEL: @lshift_u8_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8> // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint8m1_t lshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) { return a << b; } // CHECK-LABEL: @lshift_u8_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <32 x i8> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint8m1_t lshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) { return b << a; } // CHECK-LABEL: @rshift_u8_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i8 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i32> [[SPLAT_SPLATINSERT]], <32 x i32> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <32 x i32> [[SPLAT_SPLAT]] to <32 x i8> // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i8> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint8m1_t rshift_u8_rsplat(fixed_uint8m1_t a, uint8_t b) { return a >> b; } // CHECK-LABEL: @rshift_u8_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <32 x i8> @llvm.vector.extract.v32i8.nxv8i8( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <32 x i8> poison, i8 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <32 x i8> [[SPLAT_SPLATINSERT]], <32 x i8> poison, <32 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <32 x i8> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv8i8.v32i8( undef, <32 x i8> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint8m1_t rshift_u8_lsplat(fixed_uint8m1_t a, uint8_t b) { return b >> a; } // CHECK-LABEL: @lshift_i16_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16> // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int16m1_t lshift_i16_rsplat(fixed_int16m1_t a, int16_t b) { return a << b; } // CHECK-LABEL: @lshift_i16_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int16m1_t lshift_i16_lsplat(fixed_int16m1_t a, int16_t b) { return b << a; } // CHECK-LABEL: @rshift_i16_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = sext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16> // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i16> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int16m1_t rshift_i16_rsplat(fixed_int16m1_t a, int16_t b) { return a >> b; } // CHECK-LABEL: @rshift_i16_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <16 x i16> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int16m1_t rshift_i16_lsplat(fixed_int16m1_t a, int16_t b) { return b >> a; } // CHECK-LABEL: @lshift_u16_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16> // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint16m1_t lshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) { return a << b; } // CHECK-LABEL: @lshift_u16_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <16 x i16> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint16m1_t lshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) { return b << a; } // CHECK-LABEL: @rshift_u16_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[CONV:%.*]] = zext i16 [[B:%.*]] to i32 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i32> poison, i32 [[CONV]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i32> [[SPLAT_SPLATINSERT]], <16 x i32> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SH_PROM:%.*]] = trunc <16 x i32> [[SPLAT_SPLAT]] to <16 x i16> // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i16> [[A]], [[SH_PROM]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint16m1_t rshift_u16_rsplat(fixed_uint16m1_t a, uint16_t b) { return a >> b; } // CHECK-LABEL: @rshift_u16_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <16 x i16> @llvm.vector.extract.v16i16.nxv4i16( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <16 x i16> poison, i16 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <16 x i16> [[SPLAT_SPLATINSERT]], <16 x i16> poison, <16 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <16 x i16> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv4i16.v16i16( undef, <16 x i16> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint16m1_t rshift_u16_lsplat(fixed_uint16m1_t a, uint16_t b) { return b >> a; } // CHECK-LABEL: @lshift_i32_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t lshift_i32_rsplat(fixed_int32m1_t a, int32_t b) { return a << b; } // CHECK-LABEL: @lshift_i32_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t lshift_i32_lsplat(fixed_int32m1_t a, int32_t b) { return b << a; } // CHECK-LABEL: @rshift_i32_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i32> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t rshift_i32_rsplat(fixed_int32m1_t a, int32_t b) { return a >> b; } // CHECK-LABEL: @rshift_i32_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <8 x i32> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int32m1_t rshift_i32_lsplat(fixed_int32m1_t a, int32_t b) { return b >> a; } // CHECK-LABEL: @lshift_u32_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint32m1_t lshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) { return a << b; } // CHECK-LABEL: @lshift_u32_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <8 x i32> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint32m1_t lshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) { return b << a; } // CHECK-LABEL: @rshift_u32_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i32> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint32m1_t rshift_u32_rsplat(fixed_uint32m1_t a, uint32_t b) { return a >> b; } // CHECK-LABEL: @rshift_u32_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <8 x i32> @llvm.vector.extract.v8i32.nxv2i32( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <8 x i32> poison, i32 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <8 x i32> [[SPLAT_SPLATINSERT]], <8 x i32> poison, <8 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <8 x i32> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv2i32.v8i32( undef, <8 x i32> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint32m1_t rshift_u32_lsplat(fixed_uint32m1_t a, uint32_t b) { return b >> a; } // CHECK-LABEL: @lshift_i64_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int64m1_t lshift_i64_rsplat(fixed_int64m1_t a, int64_t b) { return a << b; } // CHECK-LABEL: @lshift_i64_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int64m1_t lshift_i64_lsplat(fixed_int64m1_t a, int64_t b) { return b << a; } // CHECK-LABEL: @rshift_i64_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i64> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int64m1_t rshift_i64_rsplat(fixed_int64m1_t a, int64_t b) { return a >> b; } // CHECK-LABEL: @rshift_i64_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = ashr <4 x i64> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_int64m1_t rshift_i64_lsplat(fixed_int64m1_t a, int64_t b) { return b >> a; } // CHECK-LABEL: @lshift_u64_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint64m1_t lshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) { return a << b; } // CHECK-LABEL: @lshift_u64_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHL:%.*]] = shl <4 x i64> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHL]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint64m1_t lshift_u64_lsplat(fixed_uint64m1_t a, uint64_t b) { return b << a; } // CHECK-LABEL: @rshift_u64_rsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i64> [[A]], [[SPLAT_SPLAT]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint64m1_t rshift_u64_rsplat(fixed_uint64m1_t a, uint64_t b) { return a >> b; } // CHECK-LABEL: @rshift_u64_lsplat( // CHECK-NEXT: entry: // CHECK-NEXT: [[A:%.*]] = call <4 x i64> @llvm.vector.extract.v4i64.nxv1i64( [[A_COERCE:%.*]], i64 0) // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <4 x i64> poison, i64 [[B:%.*]], i64 0 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <4 x i64> [[SPLAT_SPLATINSERT]], <4 x i64> poison, <4 x i32> zeroinitializer // CHECK-NEXT: [[SHR:%.*]] = lshr <4 x i64> [[SPLAT_SPLAT]], [[A]] // CHECK-NEXT: [[CAST_SCALABLE:%.*]] = call @llvm.vector.insert.nxv1i64.v4i64( undef, <4 x i64> [[SHR]], i64 0) // CHECK-NEXT: ret [[CAST_SCALABLE]] // fixed_uint64m1_t rshift_u64_lsplat(fixed_uint64m1_t a, uint64_t b) { return b >> a; }