; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc < %s -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefixes=CHECK,CHECK-SD ; RUN: llc < %s -global-isel=1 -verify-machineinstrs -mtriple=arm64-none-linux-gnu -mattr=+neon | FileCheck %s --check-prefixes=CHECK,CHECK-GI declare float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float>) declare float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float>) declare float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float>) declare float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float>) declare i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8>) declare i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16>) declare i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8>) declare i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8>) declare i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8>) declare i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16>) declare i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8>) declare i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16>) declare i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8>) declare i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8>) declare i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8>) declare i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16>) declare i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8>) declare i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16>) declare i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8>) declare i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8>) declare i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32>) declare i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16>) declare i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8>) declare i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16>) declare i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8>) declare i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16>) declare i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8>) define i16 @test_vaddlv_s8(<8 x i8> %a) { ; CHECK-SD-LABEL: test_vaddlv_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: saddlv h0, v0.8b ; CHECK-SD-NEXT: smov w0, v0.h[0] ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddlv_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: saddlv h0, v0.8b ; CHECK-GI-NEXT: fmov w0, s0 ; CHECK-GI-NEXT: ret entry: %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %saddlvv.i to i16 ret i16 %0 } define i32 @test_vaddlv_s16(<4 x i16> %a) { ; CHECK-LABEL: test_vaddlv_s16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: saddlv s0, v0.4h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v4i16(<4 x i16> %a) ret i32 %saddlvv.i } define i16 @test_vaddlv_u8(<8 x i8> %a) { ; CHECK-LABEL: test_vaddlv_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uaddlv h0, v0.8b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %uaddlvv.i to i16 ret i16 %0 } define i32 @test_vaddlv_u16(<4 x i16> %a) { ; CHECK-LABEL: test_vaddlv_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uaddlv s0, v0.4h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v4i16(<4 x i16> %a) ret i32 %uaddlvv.i } define i16 @test_vaddlvq_s8(<16 x i8> %a) { ; CHECK-SD-LABEL: test_vaddlvq_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: saddlv h0, v0.16b ; CHECK-SD-NEXT: smov w0, v0.h[0] ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddlvq_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: saddlv h0, v0.16b ; CHECK-GI-NEXT: fmov w0, s0 ; CHECK-GI-NEXT: ret entry: %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %saddlvv.i to i16 ret i16 %0 } define i32 @test_vaddlvq_s16(<8 x i16> %a) { ; CHECK-LABEL: test_vaddlvq_s16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: saddlv s0, v0.8h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %saddlvv.i = tail call i32 @llvm.aarch64.neon.saddlv.i32.v8i16(<8 x i16> %a) ret i32 %saddlvv.i } define i64 @test_vaddlvq_s32(<4 x i32> %a) { ; CHECK-LABEL: test_vaddlvq_s32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: saddlv d0, v0.4s ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret entry: %saddlvv.i = tail call i64 @llvm.aarch64.neon.saddlv.i64.v4i32(<4 x i32> %a) ret i64 %saddlvv.i } define i16 @test_vaddlvq_u8(<16 x i8> %a) { ; CHECK-LABEL: test_vaddlvq_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uaddlv h0, v0.16b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %uaddlvv.i to i16 ret i16 %0 } define i32 @test_vaddlvq_u16(<8 x i16> %a) { ; CHECK-LABEL: test_vaddlvq_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uaddlv s0, v0.8h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uaddlvv.i = tail call i32 @llvm.aarch64.neon.uaddlv.i32.v8i16(<8 x i16> %a) ret i32 %uaddlvv.i } define i64 @test_vaddlvq_u32(<4 x i32> %a) { ; CHECK-LABEL: test_vaddlvq_u32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uaddlv d0, v0.4s ; CHECK-NEXT: fmov x0, d0 ; CHECK-NEXT: ret entry: %uaddlvv.i = tail call i64 @llvm.aarch64.neon.uaddlv.i64.v4i32(<4 x i32> %a) ret i64 %uaddlvv.i } define i8 @test_vmaxv_s8(<8 x i8> %a) { ; CHECK-SD-LABEL: test_vmaxv_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: smaxv b0, v0.8b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vmaxv_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: smaxv b0, v0.8b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %smaxv.i to i8 ret i8 %0 } define i16 @test_vmaxv_s16(<4 x i16> %a) { ; CHECK-SD-LABEL: test_vmaxv_s16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: smaxv h0, v0.4h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vmaxv_s16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: smaxv h0, v0.4h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i16(<4 x i16> %a) %0 = trunc i32 %smaxv.i to i16 ret i16 %0 } define i8 @test_vmaxv_u8(<8 x i8> %a) { ; CHECK-LABEL: test_vmaxv_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: umaxv b0, v0.8b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %umaxv.i to i8 ret i8 %0 } define i16 @test_vmaxv_u16(<4 x i16> %a) { ; CHECK-LABEL: test_vmaxv_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: umaxv h0, v0.4h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i16(<4 x i16> %a) %0 = trunc i32 %umaxv.i to i16 ret i16 %0 } define i8 @test_vmaxvq_s8(<16 x i8> %a) { ; CHECK-SD-LABEL: test_vmaxvq_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: smaxv b0, v0.16b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vmaxvq_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: smaxv b0, v0.16b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %smaxv.i to i8 ret i8 %0 } define i16 @test_vmaxvq_s16(<8 x i16> %a) { ; CHECK-SD-LABEL: test_vmaxvq_s16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: smaxv h0, v0.8h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vmaxvq_s16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: smaxv h0, v0.8h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v8i16(<8 x i16> %a) %0 = trunc i32 %smaxv.i to i16 ret i16 %0 } define i32 @test_vmaxvq_s32(<4 x i32> %a) { ; CHECK-LABEL: test_vmaxvq_s32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: smaxv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %smaxv.i = tail call i32 @llvm.aarch64.neon.smaxv.i32.v4i32(<4 x i32> %a) ret i32 %smaxv.i } define i8 @test_vmaxvq_u8(<16 x i8> %a) { ; CHECK-LABEL: test_vmaxvq_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: umaxv b0, v0.16b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %umaxv.i to i8 ret i8 %0 } define i16 @test_vmaxvq_u16(<8 x i16> %a) { ; CHECK-LABEL: test_vmaxvq_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: umaxv h0, v0.8h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v8i16(<8 x i16> %a) %0 = trunc i32 %umaxv.i to i16 ret i16 %0 } define i32 @test_vmaxvq_u32(<4 x i32> %a) { ; CHECK-LABEL: test_vmaxvq_u32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: umaxv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %umaxv.i = tail call i32 @llvm.aarch64.neon.umaxv.i32.v4i32(<4 x i32> %a) ret i32 %umaxv.i } define i8 @test_vminv_s8(<8 x i8> %a) { ; CHECK-SD-LABEL: test_vminv_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: sminv b0, v0.8b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vminv_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: sminv b0, v0.8b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %sminv.i to i8 ret i8 %0 } define i16 @test_vminv_s16(<4 x i16> %a) { ; CHECK-SD-LABEL: test_vminv_s16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: sminv h0, v0.4h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vminv_s16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: sminv h0, v0.4h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i16(<4 x i16> %a) %0 = trunc i32 %sminv.i to i16 ret i16 %0 } define i8 @test_vminv_u8(<8 x i8> %a) { ; CHECK-LABEL: test_vminv_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uminv b0, v0.8b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %uminv.i to i8 ret i8 %0 } define i16 @test_vminv_u16(<4 x i16> %a) { ; CHECK-LABEL: test_vminv_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uminv h0, v0.4h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i16(<4 x i16> %a) %0 = trunc i32 %uminv.i to i16 ret i16 %0 } define i8 @test_vminvq_s8(<16 x i8> %a) { ; CHECK-SD-LABEL: test_vminvq_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: sminv b0, v0.16b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vminvq_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: sminv b0, v0.16b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %sminv.i to i8 ret i8 %0 } define i16 @test_vminvq_s16(<8 x i16> %a) { ; CHECK-SD-LABEL: test_vminvq_s16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: sminv h0, v0.8h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vminvq_s16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: sminv h0, v0.8h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v8i16(<8 x i16> %a) %0 = trunc i32 %sminv.i to i16 ret i16 %0 } define i32 @test_vminvq_s32(<4 x i32> %a) { ; CHECK-LABEL: test_vminvq_s32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: sminv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %sminv.i = tail call i32 @llvm.aarch64.neon.sminv.i32.v4i32(<4 x i32> %a) ret i32 %sminv.i } define i8 @test_vminvq_u8(<16 x i8> %a) { ; CHECK-LABEL: test_vminvq_u8: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uminv b0, v0.16b ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %uminv.i to i8 ret i8 %0 } define i16 @test_vminvq_u16(<8 x i16> %a) { ; CHECK-LABEL: test_vminvq_u16: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uminv h0, v0.8h ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v8i16(<8 x i16> %a) %0 = trunc i32 %uminv.i to i16 ret i16 %0 } define i32 @test_vminvq_u32(<4 x i32> %a) { ; CHECK-LABEL: test_vminvq_u32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uminv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %uminv.i = tail call i32 @llvm.aarch64.neon.uminv.i32.v4i32(<4 x i32> %a) ret i32 %uminv.i } define i8 @test_vaddv_s8(<8 x i8> %a) { ; CHECK-SD-LABEL: test_vaddv_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv b0, v0.8b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddv_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv b0, v0.8b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %vaddv.i to i8 ret i8 %0 } define i16 @test_vaddv_s16(<4 x i16> %a) { ; CHECK-SD-LABEL: test_vaddv_s16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv h0, v0.4h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddv_s16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv h0, v0.4h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a) %0 = trunc i32 %vaddv.i to i16 ret i16 %0 } define i8 @test_vaddv_u8(<8 x i8> %a) { ; CHECK-SD-LABEL: test_vaddv_u8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv b0, v0.8b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddv_u8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv b0, v0.8b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i8(<8 x i8> %a) %0 = trunc i32 %vaddv.i to i8 ret i8 %0 } define i16 @test_vaddv_u16(<4 x i16> %a) { ; CHECK-SD-LABEL: test_vaddv_u16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv h0, v0.4h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddv_u16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv h0, v0.4h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i16(<4 x i16> %a) %0 = trunc i32 %vaddv.i to i16 ret i16 %0 } define i8 @test_vaddvq_s8(<16 x i8> %a) { ; CHECK-SD-LABEL: test_vaddvq_s8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv b0, v0.16b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddvq_s8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv b0, v0.16b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %vaddv.i to i8 ret i8 %0 } define i16 @test_vaddvq_s16(<8 x i16> %a) { ; CHECK-SD-LABEL: test_vaddvq_s16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv h0, v0.8h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddvq_s16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv h0, v0.8h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a) %0 = trunc i32 %vaddv.i to i16 ret i16 %0 } define i32 @test_vaddvq_s32(<4 x i32> %a) { ; CHECK-LABEL: test_vaddvq_s32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a) ret i32 %vaddv.i } define i8 @test_vaddvq_u8(<16 x i8> %a) { ; CHECK-SD-LABEL: test_vaddvq_u8: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv b0, v0.16b ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddvq_u8: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv b0, v0.16b ; CHECK-GI-NEXT: smov w0, v0.b[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v16i8(<16 x i8> %a) %0 = trunc i32 %vaddv.i to i8 ret i8 %0 } define i16 @test_vaddvq_u16(<8 x i16> %a) { ; CHECK-SD-LABEL: test_vaddvq_u16: ; CHECK-SD: // %bb.0: // %entry ; CHECK-SD-NEXT: addv h0, v0.8h ; CHECK-SD-NEXT: fmov w0, s0 ; CHECK-SD-NEXT: ret ; ; CHECK-GI-LABEL: test_vaddvq_u16: ; CHECK-GI: // %bb.0: // %entry ; CHECK-GI-NEXT: addv h0, v0.8h ; CHECK-GI-NEXT: smov w0, v0.h[0] ; CHECK-GI-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v8i16(<8 x i16> %a) %0 = trunc i32 %vaddv.i to i16 ret i16 %0 } define i32 @test_vaddvq_u32(<4 x i32> %a) { ; CHECK-LABEL: test_vaddvq_u32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: addv s0, v0.4s ; CHECK-NEXT: fmov w0, s0 ; CHECK-NEXT: ret entry: %vaddv.i = tail call i32 @llvm.aarch64.neon.saddv.i32.v4i32(<4 x i32> %a) ret i32 %vaddv.i } define float @test_vmaxvq_f32(<4 x float> %a) { ; CHECK-LABEL: test_vmaxvq_f32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fmaxv s0, v0.4s ; CHECK-NEXT: ret entry: %0 = call float @llvm.aarch64.neon.fmaxv.f32.v4f32(<4 x float> %a) ret float %0 } define float @test_vminvq_f32(<4 x float> %a) { ; CHECK-LABEL: test_vminvq_f32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fminv s0, v0.4s ; CHECK-NEXT: ret entry: %0 = call float @llvm.aarch64.neon.fminv.f32.v4f32(<4 x float> %a) ret float %0 } define float @test_vmaxnmvq_f32(<4 x float> %a) { ; CHECK-LABEL: test_vmaxnmvq_f32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fmaxnmv s0, v0.4s ; CHECK-NEXT: ret entry: %0 = call float @llvm.aarch64.neon.fmaxnmv.f32.v4f32(<4 x float> %a) ret float %0 } define float @test_vminnmvq_f32(<4 x float> %a) { ; CHECK-LABEL: test_vminnmvq_f32: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: fminnmv s0, v0.4s ; CHECK-NEXT: ret entry: %0 = call float @llvm.aarch64.neon.fminnmv.f32.v4f32(<4 x float> %a) ret float %0 }