; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s | FileCheck %s target triple = "aarch64-unknown-linux-gnu" ; Ensure we rely on the reduction's implicit zero filling. define @andv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: andv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: andv b0, p0, z0.b ; CHECK-NEXT: ret %t1 = call i8 @llvm.aarch64.sve.andv.nxv16i8( %pg, %a) %t2 = insertelement zeroinitializer, i8 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @eorv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: eorv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: eorv h0, p0, z0.h ; CHECK-NEXT: ret %t1 = call i16 @llvm.aarch64.sve.eorv.nxv8i16( %pg, %a) %t2 = insertelement zeroinitializer, i16 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @fadda_zero_fill( %pg, double %init, %a) #0 { ; CHECK-LABEL: fadda_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $d0 killed $d0 def $z0 ; CHECK-NEXT: fadda d0, p0, d0, z1.d ; CHECK-NEXT: ret %t1 = call double @llvm.aarch64.sve.fadda.nxv2f64( %pg, double %init, %a) %t2 = insertelement zeroinitializer, double %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @faddv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: faddv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: faddv s0, p0, z0.s ; CHECK-NEXT: ret %t1 = call float @llvm.aarch64.sve.faddv.nxv4f32( %pg, %a) %t2 = insertelement zeroinitializer, float %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @fmaxv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: fmaxv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: fmaxv h0, p0, z0.h ; CHECK-NEXT: ret %t1 = call half @llvm.aarch64.sve.fmaxv.nxv8f16( %pg, %a) %t2 = insertelement zeroinitializer, half %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @fmaxnmv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: fmaxnmv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: fmaxnmv s0, p0, z0.s ; CHECK-NEXT: ret %t1 = call float @llvm.aarch64.sve.fmaxnmv.nxv2f32( %pg, %a) %t2 = insertelement zeroinitializer, float %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @fminnmv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: fminnmv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: fminnmv s0, p0, z0.s ; CHECK-NEXT: ret %t1 = call float @llvm.aarch64.sve.fminnmv.nxv2f32( %pg, %a) %t2 = insertelement zeroinitializer, float %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @fminv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: fminv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: fminv s0, p0, z0.s ; CHECK-NEXT: ret %t1 = call float @llvm.aarch64.sve.fminv.nxv2f32( %pg, %a) %t2 = insertelement zeroinitializer, float %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @orv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: orv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: orv s0, p0, z0.s ; CHECK-NEXT: ret %t1 = call i32 @llvm.aarch64.sve.orv.nxv4i32( %pg, %a) %t2 = insertelement zeroinitializer, i32 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @saddv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: saddv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: saddv d0, p0, z0.b ; CHECK-NEXT: ret %t1 = call i64 @llvm.aarch64.sve.saddv.nxv16i8( %pg, %a) %t2 = insertelement zeroinitializer, i64 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @smaxv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: smaxv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: smaxv d0, p0, z0.d ; CHECK-NEXT: ret %t1 = call i64 @llvm.aarch64.sve.smaxv.nxv2i64( %pg, %a) %t2 = insertelement zeroinitializer, i64 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @sminv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: sminv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: sminv s0, p0, z0.s ; CHECK-NEXT: ret %t1 = call i32 @llvm.aarch64.sve.sminv.nxv4i32( %pg, %a) %t2 = insertelement zeroinitializer, i32 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @uaddv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: uaddv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: uaddv d0, p0, z0.h ; CHECK-NEXT: ret %t1 = call i64 @llvm.aarch64.sve.uaddv.nxv8i16( %pg, %a) %t2 = insertelement zeroinitializer, i64 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @umaxv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: umaxv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: umaxv b0, p0, z0.b ; CHECK-NEXT: ret %t1 = call i8 @llvm.aarch64.sve.umaxv.nxv16i8( %pg, %a) %t2 = insertelement zeroinitializer, i8 %t1, i64 0 ret %t2 } ; Ensure we rely on the reduction's implicit zero filling. define @uminv_zero_fill( %pg, %a) #0 { ; CHECK-LABEL: uminv_zero_fill: ; CHECK: // %bb.0: ; CHECK-NEXT: uminv d0, p0, z0.d ; CHECK-NEXT: ret %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64( %pg, %a) %t2 = insertelement zeroinitializer, i64 %t1, i64 0 ret %t2 } ; Ensure explicit zeroing when inserting into a lane other than 0. ; NOTE: This test doesn't care about the exact way an insert is code generated, ; so only checks the presence of one instruction from the expected chain. define @zero_fill_non_zero_index( %pg, %a) #0 { ; CHECK-LABEL: zero_fill_non_zero_index: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p1.d ; CHECK-NEXT: mov w8, #1 // =0x1 ; CHECK-NEXT: index z1.d, #0, #1 ; CHECK-NEXT: uminv d3, p0, z0.d ; CHECK-NEXT: mov z2.d, x8 ; CHECK-NEXT: mov z0.d, #0 // =0x0 ; CHECK-NEXT: fmov x8, d3 ; CHECK-NEXT: cmpeq p0.d, p1/z, z1.d, z2.d ; CHECK-NEXT: mov z0.d, p0/m, x8 ; CHECK-NEXT: ret %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64( %pg, %a) %t2 = insertelement zeroinitializer, i64 %t1, i64 1 ret %t2 } ; Ensure explicit zeroing when the result vector is larger than that produced by ; the reduction instruction. define @zero_fill_type_mismatch( %pg, %a) #0 { ; CHECK-LABEL: zero_fill_type_mismatch: ; CHECK: // %bb.0: ; CHECK-NEXT: uminv d0, p0, z0.d ; CHECK-NEXT: mov z1.d, #0 // =0x0 ; CHECK-NEXT: ret %t1 = call i64 @llvm.aarch64.sve.uminv.nxv2i64( %pg, %a) %t2 = insertelement zeroinitializer, i64 %t1, i64 0 ret %t2 } ; Ensure explicit zeroing when extracting an element from an operation that ; cannot guarantee lanes 1-N are zero. ; NOTE: This test doesn't care about the exact way an insert is code generated, ; so only checks the presence of one instruction from the expected chain. define @zero_fill_no_zero_upper_lanes( %pg, %a) #0 { ; CHECK-LABEL: zero_fill_no_zero_upper_lanes: ; CHECK: // %bb.0: ; CHECK-NEXT: ptrue p1.d, vl1 ; CHECK-NEXT: umin z0.d, p0/m, z0.d, z0.d ; CHECK-NEXT: mov z1.d, #0 // =0x0 ; CHECK-NEXT: fmov x8, d0 ; CHECK-NEXT: mov z1.d, p1/m, x8 ; CHECK-NEXT: mov z0.d, z1.d ; CHECK-NEXT: ret %t1 = call @llvm.aarch64.sve.umin.nxv2i64( %pg, %a, %a) %t2 = extractelement %t1, i64 0 %t3 = insertelement zeroinitializer, i64 %t2, i64 0 ret %t3 } declare i8 @llvm.aarch64.sve.andv.nxv2i8(, ) declare i8 @llvm.aarch64.sve.andv.nxv16i8(, ) declare i8 @llvm.aarch64.sve.eorv.nxv2i8(, ) declare i16 @llvm.aarch64.sve.eorv.nxv8i16(, ) declare float @llvm.aarch64.sve.fadda.nxv2f32(, float, ) declare double @llvm.aarch64.sve.fadda.nxv2f64(, double, ) declare float @llvm.aarch64.sve.faddv.nxv2f32(, ) declare float @llvm.aarch64.sve.faddv.nxv4f32(, ) declare float @llvm.aarch64.sve.fmaxnmv.nxv2f32(, ) declare half @llvm.aarch64.sve.fmaxv.nxv8f16(, ) declare float @llvm.aarch64.sve.fmaxv.nxv2f32(, ) declare float @llvm.aarch64.sve.fminv.nxv2f32(, ) declare float @llvm.aarch64.sve.fminnmv.nxv2f32(, ) declare i8 @llvm.aarch64.sve.orv.nxv2i8(, ) declare i32 @llvm.aarch64.sve.orv.nxv4i32(, ) declare i64 @llvm.aarch64.sve.saddv.nxv2i8(, ) declare i64 @llvm.aarch64.sve.saddv.nxv16i8(, ) declare i8 @llvm.aarch64.sve.smaxv.nxv2i8(, ) declare i64 @llvm.aarch64.sve.smaxv.nxv2i64(, ) declare i8 @llvm.aarch64.sve.sminv.nxv2i8(, ) declare i32 @llvm.aarch64.sve.sminv.nxv4i32(, ) declare i64 @llvm.aarch64.sve.uaddv.nxv2i8(, ) declare i64 @llvm.aarch64.sve.uaddv.nxv8i16(, ) declare i8 @llvm.aarch64.sve.umaxv.nxv2i8(, ) declare i8 @llvm.aarch64.sve.umaxv.nxv16i8(, ) declare i8 @llvm.aarch64.sve.uminv.nxv2i8(, ) declare i64 @llvm.aarch64.sve.uminv.nxv2i64(, ) declare @llvm.aarch64.sve.umin.nxv2i64(, , ) attributes #0 = { "target-features"="+sve" }