; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve2p1 -mattr=+b16b16 -mattr=+use-experimental-zeroing-pseudos -verify-machineinstrs < %s \ ; RUN: | FileCheck %s define @bfmul_pred( %pg, %a, %b){ ; CHECK-LABEL: bfmul_pred: ; CHECK: // %bb.0: ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.fmul.nxv8bf16( %pg, %a, %b) ret %res } define @bfmul_zeroing( %pg, %a, %b) { ; CHECK-LABEL: bfmul_zeroing: ; CHECK: // %bb.0: ; CHECK-NEXT: movprfx z0.h, p0/z, z0.h ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmul.nxv8bf16( %pg, %a_z, %b) ret %out } define @bfmul_u_pred( %pg, %a, %b){ ; CHECK-LABEL: bfmul_u_pred: ; CHECK: // %bb.0: ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %res = call @llvm.aarch64.sve.fmul.u.nxv8bf16( %pg, %a, %b) ret %res } define @bfmul_u( %a, %b){ ; CHECK-LABEL: bfmul_u: ; CHECK: // %bb.0: ; CHECK-NEXT: bfmul z0.h, z0.h, z1.h ; CHECK-NEXT: ret %elt = call @llvm.aarch64.sve.ptrue.nxv8i1(i32 31) %res = call @llvm.aarch64.sve.fmul.u.nxv8bf16( %elt, %a, %b) ret %res } define @bfmul_u_zeroing( %pg, %a, %b) { ; CHECK-LABEL: bfmul_u_zeroing: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z2.h, #0 // =0x0 ; CHECK-NEXT: sel z0.h, p0, z0.h, z2.h ; CHECK-NEXT: bfmul z0.h, p0/m, z0.h, z1.h ; CHECK-NEXT: ret %a_z = select %pg, %a, zeroinitializer %out = call @llvm.aarch64.sve.fmul.u.nxv8bf16( %pg, %a_z, %b) ret %out } declare @llvm.aarch64.sve.fmul.nxv8bf16(, , ) declare @llvm.aarch64.sve.fmul.u.nxv8bf16(, , ) declare @llvm.aarch64.sve.ptrue.nxv8i1(i32 immarg)