; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -S -passes=instcombine < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" define dso_local @dupq_f32_ab_pattern(float %x, float %y) { ; CHECK-LABEL: @dupq_f32_ab_pattern( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <4 x float> poison, float [[X:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <4 x float> [[TMP1]], float [[Y:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.insert.nxv4f32.v4f32( poison, <4 x float> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast [[TMP3]] to ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector [[TMP4]], poison, zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = bitcast [[TMP5]] to ; CHECK-NEXT: ret [[TMP6]] ; %1 = insertelement <4 x float> poison, float %x, i64 0 %2 = insertelement <4 x float> %1, float %y, i64 1 %3 = insertelement <4 x float> %2, float %x, i64 2 %4 = insertelement <4 x float> %3, float %y, i64 3 %5 = tail call @llvm.vector.insert.nxv4f32.v4f32( poison, <4 x float> %4, i64 0) %6 = tail call @llvm.aarch64.sve.dupq.lane.nxv4f32( %5, i64 0) ret %6 } define dso_local @dupq_f16_a_pattern(half %a) { ; CHECK-LABEL: @dupq_f16_a_pattern( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = shufflevector <8 x half> [[TMP1]], <8 x half> poison, <8 x i32> zeroinitializer ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP3]], i64 0) ; CHECK-NEXT: ret [[TMP4]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %a, i64 1 %3 = insertelement <8 x half> %2, half %a, i64 2 %4 = insertelement <8 x half> %3, half %a, i64 3 %5 = insertelement <8 x half> %4, half %a, i64 4 %6 = insertelement <8 x half> %5, half %a, i64 5 %7 = insertelement <8 x half> %6, half %a, i64 6 %8 = insertelement <8 x half> %7, half %a, i64 7 %9 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %8, i64 0) %10 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %9, i64 0) ret %10 } define dso_local @dupq_f16_ab_pattern(half %a, half %b) { ; CHECK-LABEL: @dupq_f16_ab_pattern( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast [[TMP3]] to ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector [[TMP4]], poison, zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = bitcast [[TMP5]] to ; CHECK-NEXT: ret [[TMP6]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %b, i64 1 %3 = insertelement <8 x half> %2, half %a, i64 2 %4 = insertelement <8 x half> %3, half %b, i64 3 %5 = insertelement <8 x half> %4, half %a, i64 4 %6 = insertelement <8 x half> %5, half %b, i64 5 %7 = insertelement <8 x half> %6, half %a, i64 6 %8 = insertelement <8 x half> %7, half %b, i64 7 %9 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %8, i64 0) %10 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %9, i64 0) ret %10 } define dso_local @dupq_f16_abcd_pattern(half %a, half %b, half %c, half %d) { ; CHECK-LABEL: @dupq_f16_abcd_pattern( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[D:%.*]], i64 3 ; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = bitcast [[TMP5]] to ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector [[TMP6]], poison, zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = bitcast [[TMP7]] to ; CHECK-NEXT: ret [[TMP8]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %b, i64 1 %3 = insertelement <8 x half> %2, half %c, i64 2 %4 = insertelement <8 x half> %3, half %d, i64 3 %5 = insertelement <8 x half> %4, half %a, i64 4 %6 = insertelement <8 x half> %5, half %b, i64 5 %7 = insertelement <8 x half> %6, half %c, i64 6 %8 = insertelement <8 x half> %7, half %d, i64 7 %9 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %8, i64 0) %10 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %9, i64 0) ret %10 } define dso_local @dupq_f16_abcnull_pattern(half %a, half %b, half %c, half %d) { ; CHECK-LABEL: @dupq_f16_abcnull_pattern( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2 ; CHECK-NEXT: [[TMP4:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP3]], i64 0) ; CHECK-NEXT: [[TMP5:%.*]] = bitcast [[TMP4]] to ; CHECK-NEXT: [[TMP6:%.*]] = shufflevector [[TMP5]], poison, zeroinitializer ; CHECK-NEXT: [[TMP7:%.*]] = bitcast [[TMP6]] to ; CHECK-NEXT: ret [[TMP7]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %b, i64 1 %3 = insertelement <8 x half> %2, half %c, i64 2 %4 = insertelement <8 x half> %3, half %a, i64 4 %5 = insertelement <8 x half> %4, half %b, i64 5 %6 = insertelement <8 x half> %5, half %c, i64 6 %7 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %6, i64 0) %8 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %7, i64 0) ret %8 } define dso_local @dupq_f16_abnull_pattern(half %a, half %b) { ; CHECK-LABEL: @dupq_f16_abnull_pattern( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = bitcast [[TMP3]] to ; CHECK-NEXT: [[TMP5:%.*]] = shufflevector [[TMP4]], poison, zeroinitializer ; CHECK-NEXT: [[TMP6:%.*]] = bitcast [[TMP5]] to ; CHECK-NEXT: ret [[TMP6]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %b, i64 1 %3 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %2, i64 0) %4 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %3, i64 0) ret %4 } define dso_local @neg_dupq_f16_non_poison_fixed(half %a, half %b, <8 x half> %v) { ; CHECK-LABEL: @neg_dupq_f16_non_poison_fixed( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> [[V:%.*]], half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP3]], i64 0) ; CHECK-NEXT: ret [[TMP4]] ; %1 = insertelement <8 x half> %v, half %a, i64 0 %2 = insertelement <8 x half> %1, half %b, i64 1 %3 = insertelement <8 x half> %2, half %a, i64 0 %4 = insertelement <8 x half> %3, half %b, i64 1 %5 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %4, i64 0) %6 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %5, i64 0) ret %6 } define dso_local @neg_dupq_f16_into_non_poison_scalable(half %a, half %b, %v) { ; CHECK-LABEL: @neg_dupq_f16_into_non_poison_scalable( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = tail call @llvm.vector.insert.nxv8f16.v8f16( [[V:%.*]], <8 x half> [[TMP2]], i64 0) ; CHECK-NEXT: [[TMP4:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP3]], i64 0) ; CHECK-NEXT: ret [[TMP4]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %b, i64 1 %3 = insertelement <8 x half> %2, half %a, i64 0 %4 = insertelement <8 x half> %3, half %b, i64 1 %5 = tail call @llvm.vector.insert.nxv8f16.v8f16( %v, <8 x half> %4, i64 0) %6 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %5, i64 0) ret %6 } ; Insert %c to override the last element in the insertelement chain, which will fail to combine define dso_local @neg_dupq_f16_abcd_pattern_double_insert(half %a, half %b, half %c, half %d) { ; CHECK-LABEL: @neg_dupq_f16_abcd_pattern_double_insert( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[D:%.*]], i64 3 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[A]], i64 4 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[B]], i64 5 ; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[C]], i64 6 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x half> [[TMP7]], half [[C]], i64 7 ; CHECK-NEXT: [[TMP9:%.*]] = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP8]], i64 0) ; CHECK-NEXT: [[TMP10:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP9]], i64 0) ; CHECK-NEXT: ret [[TMP10]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %b, i64 1 %3 = insertelement <8 x half> %2, half %c, i64 2 %4 = insertelement <8 x half> %3, half %d, i64 3 %5 = insertelement <8 x half> %4, half %a, i64 4 %6 = insertelement <8 x half> %5, half %b, i64 5 %7 = insertelement <8 x half> %6, half %c, i64 6 %8 = insertelement <8 x half> %7, half %d, i64 7 %9 = insertelement <8 x half> %8, half %c, i64 7 %10 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %9, i64 0) %11 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %10, i64 0) ret %11 } define dso_local @dupq_f16_abcd_pattern_reverted_insert(half %a, half %b, half %c, half %d) { ; CHECK-LABEL: @dupq_f16_abcd_pattern_reverted_insert( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[B:%.*]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[C:%.*]], i64 2 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[D:%.*]], i64 3 ; CHECK-NEXT: [[TMP5:%.*]] = call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP4]], i64 0) ; CHECK-NEXT: [[TMP6:%.*]] = bitcast [[TMP5]] to ; CHECK-NEXT: [[TMP7:%.*]] = shufflevector [[TMP6]], poison, zeroinitializer ; CHECK-NEXT: [[TMP8:%.*]] = bitcast [[TMP7]] to ; CHECK-NEXT: ret [[TMP8]] ; %1 = insertelement <8 x half> poison, half %d, i64 7 %2 = insertelement <8 x half> %1, half %c, i64 6 %3 = insertelement <8 x half> %2, half %b, i64 5 %4 = insertelement <8 x half> %3, half %a, i64 4 %5 = insertelement <8 x half> %4, half %d, i64 3 %6 = insertelement <8 x half> %5, half %c, i64 2 %7 = insertelement <8 x half> %6, half %b, i64 1 %8 = insertelement <8 x half> %7, half %a, i64 0 %9 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %8, i64 0) %10 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %9, i64 0) ret %10 } define dso_local @dupq_f16_ab_no_front_pattern(half %a, half %b) { ; CHECK-LABEL: @dupq_f16_ab_no_front_pattern( ; CHECK-NEXT: [[TMP1:%.*]] = insertelement <8 x half> poison, half [[A:%.*]], i64 0 ; CHECK-NEXT: [[TMP2:%.*]] = insertelement <8 x half> [[TMP1]], half [[A]], i64 1 ; CHECK-NEXT: [[TMP3:%.*]] = insertelement <8 x half> [[TMP2]], half [[A]], i64 2 ; CHECK-NEXT: [[TMP4:%.*]] = insertelement <8 x half> [[TMP3]], half [[B:%.*]], i64 3 ; CHECK-NEXT: [[TMP5:%.*]] = insertelement <8 x half> [[TMP4]], half [[A]], i64 4 ; CHECK-NEXT: [[TMP6:%.*]] = insertelement <8 x half> [[TMP5]], half [[B]], i64 5 ; CHECK-NEXT: [[TMP7:%.*]] = insertelement <8 x half> [[TMP6]], half [[A]], i64 6 ; CHECK-NEXT: [[TMP8:%.*]] = insertelement <8 x half> [[TMP7]], half [[B]], i64 7 ; CHECK-NEXT: [[TMP9:%.*]] = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> [[TMP8]], i64 0) ; CHECK-NEXT: [[TMP10:%.*]] = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( [[TMP9]], i64 0) ; CHECK-NEXT: ret [[TMP10]] ; %1 = insertelement <8 x half> poison, half %a, i64 0 %2 = insertelement <8 x half> %1, half %a, i64 1 %3 = insertelement <8 x half> %2, half %a, i64 2 %4 = insertelement <8 x half> %3, half %b, i64 3 %5 = insertelement <8 x half> %4, half %a, i64 4 %6 = insertelement <8 x half> %5, half %b, i64 5 %7 = insertelement <8 x half> %6, half %a, i64 6 %8 = insertelement <8 x half> %7, half %b, i64 7 %9 = tail call @llvm.vector.insert.nxv8f16.v8f16( poison, <8 x half> %8, i64 0) %10 = tail call @llvm.aarch64.sve.dupq.lane.nxv8f16( %9, i64 0) ret %10 } declare @llvm.vector.insert.nxv8f16.v8f16(, <8 x half>, i64) declare @llvm.aarch64.sve.dupq.lane.nxv8f16(, i64) declare @llvm.vector.insert.nxv4f32.v4f32(, <4 x float>, i64) declare @llvm.vector.insert.nxv2f32.v2f32(, <2 x float>, i64) declare @llvm.aarch64.sve.dupq.lane.nxv4f32(, i64) declare @llvm.vector.insert.nxv4i32.v4i32(, <4 x i32>, i64) declare @llvm.aarch64.sve.dupq.lane.nxv4i32(, i64) declare @llvm.vector.insert.nxv8i16.v8i16(, <8 x i16>, i64) declare @llvm.aarch64.sve.dupq.lane.nxv8i16(, i64) attributes #0 = { "target-features"="+sve" }