; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s --mattr=+sve -o - | FileCheck %s target triple = "aarch64-unknown-linux-gnu" ; a * b + c define @mull_add( %a, %b, %c) { ; CHECK-LABEL: mull_add: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uzp1 z6.d, z0.d, z1.d ; CHECK-NEXT: uzp2 z7.d, z2.d, z3.d ; CHECK-NEXT: uzp2 z0.d, z0.d, z1.d ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: uzp1 z1.d, z2.d, z3.d ; CHECK-NEXT: fmul z2.d, z6.d, z7.d ; CHECK-NEXT: fmul z3.d, z0.d, z7.d ; CHECK-NEXT: fmad z0.d, p0/m, z1.d, z2.d ; CHECK-NEXT: fnmsb z1.d, p0/m, z6.d, z3.d ; CHECK-NEXT: uzp2 z2.d, z4.d, z5.d ; CHECK-NEXT: uzp1 z3.d, z4.d, z5.d ; CHECK-NEXT: fadd z2.d, z0.d, z2.d ; CHECK-NEXT: fadd z1.d, z3.d, z1.d ; CHECK-NEXT: zip1 z0.d, z1.d, z2.d ; CHECK-NEXT: zip2 z1.d, z1.d, z2.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %a) %0 = extractvalue { , } %strided.vec, 0 %1 = extractvalue { , } %strided.vec, 1 %strided.vec29 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %b) %2 = extractvalue { , } %strided.vec29, 0 %3 = extractvalue { , } %strided.vec29, 1 %4 = fmul contract %0, %3 %5 = fmul contract %1, %2 %6 = fadd contract %5, %4 %7 = fmul contract %0, %2 %8 = fmul contract %1, %3 %9 = fsub contract %7, %8 %strided.vec31 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %c) %10 = extractvalue { , } %strided.vec31, 0 %11 = extractvalue { , } %strided.vec31, 1 %12 = fadd contract %10, %9 %13 = fadd contract %6, %11 %interleaved.vec = tail call @llvm.experimental.vector.interleave2.nxv4f64( %12, %13) ret %interleaved.vec } ; a * b + c * d define @mul_add_mull( %a, %b, %c, %d) { ; CHECK-LABEL: mul_add_mull: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z24.d, #0 // =0x0 ; CHECK-NEXT: mov z25.d, z24.d ; CHECK-NEXT: mov z26.d, z24.d ; CHECK-NEXT: mov z27.d, z24.d ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #0 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #0 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #0 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #0 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #90 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #90 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #90 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #90 ; CHECK-NEXT: fadd z0.d, z25.d, z27.d ; CHECK-NEXT: fadd z1.d, z26.d, z24.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %a) %0 = extractvalue { , } %strided.vec, 0 %1 = extractvalue { , } %strided.vec, 1 %strided.vec52 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %b) %2 = extractvalue { , } %strided.vec52, 0 %3 = extractvalue { , } %strided.vec52, 1 %4 = fmul contract %0, %3 %5 = fmul contract %1, %2 %6 = fadd contract %5, %4 %7 = fmul contract %0, %2 %8 = fmul contract %1, %3 %9 = fsub contract %7, %8 %strided.vec54 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %c) %10 = extractvalue { , } %strided.vec54, 0 %11 = extractvalue { , } %strided.vec54, 1 %strided.vec56 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %d) %12 = extractvalue { , } %strided.vec56, 0 %13 = extractvalue { , } %strided.vec56, 1 %14 = fmul contract %10, %13 %15 = fmul contract %11, %12 %16 = fadd contract %15, %14 %17 = fmul contract %10, %12 %18 = fmul contract %11, %13 %19 = fsub contract %17, %18 %20 = fadd contract %9, %19 %21 = fadd contract %6, %16 %interleaved.vec = tail call @llvm.experimental.vector.interleave2.nxv4f64( %20, %21) ret %interleaved.vec } ; a * b - c * d define @mul_sub_mull( %a, %b, %c, %d) { ; CHECK-LABEL: mul_sub_mull: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z24.d, #0 // =0x0 ; CHECK-NEXT: mov z25.d, z24.d ; CHECK-NEXT: mov z26.d, z24.d ; CHECK-NEXT: mov z27.d, z24.d ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #0 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #0 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #0 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #0 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #90 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #90 ; CHECK-NEXT: fcmla z27.d, p0/m, z6.d, z4.d, #90 ; CHECK-NEXT: fcmla z24.d, p0/m, z7.d, z5.d, #90 ; CHECK-NEXT: fsub z0.d, z25.d, z27.d ; CHECK-NEXT: fsub z1.d, z26.d, z24.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %a) %0 = extractvalue { , } %strided.vec, 0 %1 = extractvalue { , } %strided.vec, 1 %strided.vec52 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %b) %2 = extractvalue { , } %strided.vec52, 0 %3 = extractvalue { , } %strided.vec52, 1 %4 = fmul contract %0, %3 %5 = fmul contract %1, %2 %6 = fadd contract %5, %4 %7 = fmul contract %0, %2 %8 = fmul contract %1, %3 %9 = fsub contract %7, %8 %strided.vec54 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %c) %10 = extractvalue { , } %strided.vec54, 0 %11 = extractvalue { , } %strided.vec54, 1 %strided.vec56 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %d) %12 = extractvalue { , } %strided.vec56, 0 %13 = extractvalue { , } %strided.vec56, 1 %14 = fmul contract %10, %13 %15 = fmul contract %11, %12 %16 = fadd contract %15, %14 %17 = fmul contract %10, %12 %18 = fmul contract %11, %13 %19 = fsub contract %17, %18 %20 = fsub contract %9, %19 %21 = fsub contract %6, %16 %interleaved.vec = tail call @llvm.experimental.vector.interleave2.nxv4f64( %20, %21) ret %interleaved.vec } ; a * b + conj(c) * d define @mul_conj_mull( %a, %b, %c, %d) { ; CHECK-LABEL: mul_conj_mull: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: mov z24.d, #0 // =0x0 ; CHECK-NEXT: mov z25.d, z24.d ; CHECK-NEXT: mov z26.d, z24.d ; CHECK-NEXT: mov z27.d, z24.d ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #0 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #0 ; CHECK-NEXT: fcmla z27.d, p0/m, z4.d, z6.d, #0 ; CHECK-NEXT: fcmla z24.d, p0/m, z5.d, z7.d, #0 ; CHECK-NEXT: fcmla z25.d, p0/m, z2.d, z0.d, #90 ; CHECK-NEXT: fcmla z26.d, p0/m, z3.d, z1.d, #90 ; CHECK-NEXT: fcmla z27.d, p0/m, z4.d, z6.d, #270 ; CHECK-NEXT: fcmla z24.d, p0/m, z5.d, z7.d, #270 ; CHECK-NEXT: fadd z0.d, z25.d, z27.d ; CHECK-NEXT: fadd z1.d, z26.d, z24.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %a) %0 = extractvalue { , } %strided.vec, 0 %1 = extractvalue { , } %strided.vec, 1 %strided.vec60 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %b) %2 = extractvalue { , } %strided.vec60, 0 %3 = extractvalue { , } %strided.vec60, 1 %4 = fmul contract %0, %3 %5 = fmul contract %1, %2 %6 = fadd contract %5, %4 %7 = fmul contract %0, %2 %8 = fmul contract %1, %3 %9 = fsub contract %7, %8 %strided.vec62 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %c) %10 = extractvalue { , } %strided.vec62, 0 %11 = extractvalue { , } %strided.vec62, 1 %strided.vec64 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %d) %12 = extractvalue { , } %strided.vec64, 0 %13 = extractvalue { , } %strided.vec64, 1 %14 = fmul contract %10, %13 %15 = fmul contract %11, %12 %16 = fsub contract %14, %15 %17 = fmul contract %10, %12 %18 = fmul contract %11, %13 %19 = fadd contract %17, %18 %20 = fadd contract %9, %19 %21 = fadd contract %6, %16 %interleaved.vec = tail call @llvm.experimental.vector.interleave2.nxv4f64( %20, %21) ret %interleaved.vec } ; a + b + 1i * c * d define @mul_add_rot_mull( %a, %b, %c, %d) { ; CHECK-LABEL: mul_add_rot_mull: ; CHECK: // %bb.0: // %entry ; CHECK-NEXT: uzp2 z24.d, z4.d, z5.d ; CHECK-NEXT: mov z25.d, #0 // =0x0 ; CHECK-NEXT: uzp1 z4.d, z4.d, z5.d ; CHECK-NEXT: ptrue p0.d ; CHECK-NEXT: and z25.d, z25.d, #0x7fffffffffffffff ; CHECK-NEXT: mov z26.d, z24.d ; CHECK-NEXT: and z26.d, z26.d, #0x8000000000000000 ; CHECK-NEXT: orr z5.d, z25.d, z26.d ; CHECK-NEXT: fadd z5.d, z4.d, z5.d ; CHECK-NEXT: and z4.d, z4.d, #0x8000000000000000 ; CHECK-NEXT: orr z4.d, z25.d, z4.d ; CHECK-NEXT: uzp2 z25.d, z0.d, z1.d ; CHECK-NEXT: uzp1 z0.d, z0.d, z1.d ; CHECK-NEXT: uzp2 z1.d, z2.d, z3.d ; CHECK-NEXT: uzp1 z2.d, z2.d, z3.d ; CHECK-NEXT: fsub z4.d, z4.d, z24.d ; CHECK-NEXT: uzp2 z24.d, z6.d, z7.d ; CHECK-NEXT: uzp1 z6.d, z6.d, z7.d ; CHECK-NEXT: fmul z3.d, z0.d, z1.d ; CHECK-NEXT: fmul z1.d, z25.d, z1.d ; CHECK-NEXT: fmul z7.d, z4.d, z24.d ; CHECK-NEXT: fmul z24.d, z5.d, z24.d ; CHECK-NEXT: fmla z3.d, p0/m, z25.d, z2.d ; CHECK-NEXT: fnmsb z0.d, p0/m, z2.d, z1.d ; CHECK-NEXT: movprfx z1, z7 ; CHECK-NEXT: fmla z1.d, p0/m, z6.d, z5.d ; CHECK-NEXT: movprfx z2, z24 ; CHECK-NEXT: fnmls z2.d, p0/m, z4.d, z6.d ; CHECK-NEXT: fadd z2.d, z0.d, z2.d ; CHECK-NEXT: fadd z1.d, z3.d, z1.d ; CHECK-NEXT: zip1 z0.d, z2.d, z1.d ; CHECK-NEXT: zip2 z1.d, z2.d, z1.d ; CHECK-NEXT: ret entry: %strided.vec = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %a) %0 = extractvalue { , } %strided.vec, 0 %1 = extractvalue { , } %strided.vec, 1 %strided.vec78 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %b) %2 = extractvalue { , } %strided.vec78, 0 %3 = extractvalue { , } %strided.vec78, 1 %4 = fmul contract %0, %3 %5 = fmul contract %1, %2 %6 = fadd contract %5, %4 %7 = fmul contract %0, %2 %8 = fmul contract %1, %3 %9 = fsub contract %7, %8 %strided.vec80 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %c) %10 = extractvalue { , } %strided.vec80, 0 %11 = extractvalue { , } %strided.vec80, 1 %12 = tail call contract @llvm.copysign.nxv2f64( zeroinitializer, %11) %13 = fadd contract %10, %12 %14 = tail call contract @llvm.copysign.nxv2f64( zeroinitializer, %10) %15 = fsub contract %14, %11 %strided.vec82 = tail call { , } @llvm.experimental.vector.deinterleave2.nxv4f64( %d) %16 = extractvalue { , } %strided.vec82, 0 %17 = extractvalue { , } %strided.vec82, 1 %18 = fmul contract %15, %17 %19 = fmul contract %16, %13 %20 = fadd contract %19, %18 %21 = fmul contract %15, %16 %22 = fmul contract %13, %17 %23 = fsub contract %21, %22 %24 = fadd contract %9, %23 %25 = fadd contract %6, %20 %interleaved.vec = tail call @llvm.experimental.vector.interleave2.nxv4f64( %24, %25) ret %interleaved.vec } declare { , } @llvm.experimental.vector.deinterleave2.nxv4f64() declare @llvm.experimental.vector.interleave2.nxv4f64(, ) declare @llvm.copysign.nxv2f64(, )