; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -verify-machineinstrs -mtriple=powerpc64le-unknown-linux-gnu \ ; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s ; RUN: llc -verify-machineinstrs -mtriple=powerpc64-aix-xcoff \ ; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s ; RUN: llc -verify-machineinstrs -mtriple=powerpc-aix-xcoff \ ; RUN: -mcpu=pwr8 -ppc-asm-full-reg-names -ppc-vsr-nums-as-vr < %s | FileCheck %s define void @test1(<16 x i8> %0) { ; CHECK-LABEL: test1: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4sbs(<16 x i8> %0, <4 x i32> zeroinitializer) ret void } define void @test2(<8 x i16> %0) { ; CHECK-LABEL: test2: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4shs(<8 x i16> %0, <4 x i32> zeroinitializer) ret void } define void @test3(<16 x i8> %0) { ; CHECK-LABEL: test3: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4ubs(<16 x i8> %0, <4 x i32> zeroinitializer) ret void } define void @test4(<16 x i8> %0) { ; CHECK-LABEL: test4: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vspltisw v3, 1 ; CHECK-NEXT: vsum4sbs v2, v2, v3 ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4sbs(<16 x i8> %0, <4 x i32> ) ret void } define void @test5(<8 x i16> %0) { ; CHECK-LABEL: test5: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vspltisw v3, 1 ; CHECK-NEXT: vsum4shs v2, v2, v3 ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4shs(<8 x i16> %0, <4 x i32> ) ret void } define void @test6(<16 x i8> %0) { ; CHECK-LABEL: test6: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: vspltisw v3, 1 ; CHECK-NEXT: vsum4ubs v2, v2, v3 ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4ubs(<16 x i8> %0, <4 x i32> ) ret void } define <4 x i32> @test7(<16 x i8> %0) { ; CHECK-LABEL: test7: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v3, v3, v3 ; CHECK-NEXT: vsum4sbs v2, v2, v3 ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4sbs(<16 x i8> %0, <4 x i32> zeroinitializer) ret <4 x i32> %1 } define <4 x i32> @test8(<8 x i16> %0) { ; CHECK-LABEL: test8: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v3, v3, v3 ; CHECK-NEXT: vsum4shs v2, v2, v3 ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4shs(<8 x i16> %0, <4 x i32> zeroinitializer) ret <4 x i32> %1 } define <4 x i32> @test9(<16 x i8> %0) { ; CHECK-LABEL: test9: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v3, v3, v3 ; CHECK-NEXT: vsum4ubs v2, v2, v3 ; CHECK-NEXT: blr entry: %1 = tail call <4 x i32> @llvm.ppc.altivec.vsum4ubs(<16 x i8> %0, <4 x i32> zeroinitializer) ret <4 x i32> %1 } define <4 x i32> @test10(<16 x i8> %0, <16 x i8> %1) { ; CHECK-LABEL: test10: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v3, v3, v3 ; CHECK-NEXT: vsum4sbs v2, v2, v3 ; CHECK-NEXT: blr entry: %2 = tail call <4 x i32> @llvm.ppc.altivec.vsum4sbs(<16 x i8> %0, <4 x i32> zeroinitializer) %3 = tail call <4 x i32> @llvm.ppc.altivec.vsum4sbs(<16 x i8> %1, <4 x i32> zeroinitializer) ret <4 x i32> %2 } define <4 x i32> @test11(<8 x i16> %0, <8 x i16> %1) { ; CHECK-LABEL: test11: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v3, v3, v3 ; CHECK-NEXT: vsum4shs v2, v2, v3 ; CHECK-NEXT: blr entry: %2 = tail call <4 x i32> @llvm.ppc.altivec.vsum4shs(<8 x i16> %0, <4 x i32> zeroinitializer) %3 = tail call <4 x i32> @llvm.ppc.altivec.vsum4shs(<8 x i16> %1, <4 x i32> zeroinitializer) ret <4 x i32> %2 } define <4 x i32> @test12(<16 x i8> %0, <16 x i8> %1) { ; CHECK-LABEL: test12: ; CHECK: # %bb.0: # %entry ; CHECK-NEXT: xxlxor v3, v3, v3 ; CHECK-NEXT: vsum4ubs v2, v2, v3 ; CHECK-NEXT: blr entry: %2 = tail call <4 x i32> @llvm.ppc.altivec.vsum4ubs(<16 x i8> %0, <4 x i32> zeroinitializer) %3 = tail call <4 x i32> @llvm.ppc.altivec.vsum4ubs(<16 x i8> %1, <4 x i32> zeroinitializer) ret <4 x i32> %2 } declare <4 x i32> @llvm.ppc.altivec.vsum4sbs(<16 x i8>, <4 x i32>) declare <4 x i32> @llvm.ppc.altivec.vsum4shs(<8 x i16>, <4 x i32>) declare <4 x i32> @llvm.ppc.altivec.vsum4ubs(<16 x i8>, <4 x i32>)