; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+v -target-abi=lp64d -verify-machineinstrs < %s | FileCheck %s declare @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) declare @llvm.vp.fadd.nxv1f64( %x, %y, %m, i32 %vl) ; (fadd (fmul x, y), z)) -> (fma x, y, z) define @fma( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: fma: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %1 = call fast @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) %2 = call fast @llvm.vp.fadd.nxv1f64( %1, %z, %m, i32 %vl) ret %2 } ; (fadd z, (fmul x, y))) -> (fma x, y, z) define @fma_commute( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: fma_commute: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %1 = call fast @llvm.vp.fmul.nxv1f64( %x, %y, %m, i32 %vl) %2 = call fast @llvm.vp.fadd.nxv1f64( %z, %1, %m, i32 %vl) ret %2 } ; Test operand with true mask define @fma_true( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: fma_true: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %head = insertelement poison, i1 true, i32 0 %true = shufflevector %head, poison, zeroinitializer %1 = call fast @llvm.vp.fmul.nxv1f64( %x, %y, %true, i32 %vl) %2 = call fast @llvm.vp.fadd.nxv1f64( %1, %z, %m, i32 %vl) ret %2 } ; Test operand with normal opcode. define @fma_nonvp( %x, %y, %z, %m, i32 zeroext %vl) { ; CHECK-LABEL: fma_nonvp: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v9, v8, v10, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %1 = fmul fast %x, %y %2 = call fast @llvm.vp.fadd.nxv1f64( %1, %z, %m, i32 %vl) ret %2 } define @fma_reassociate( %a, %b, %c, %d, %e, %m, i32 zeroext %vl) { ; CHECK-LABEL: fma_reassociate: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vfmadd.vv v11, v10, v12, v0.t ; CHECK-NEXT: vfmadd.vv v9, v8, v11, v0.t ; CHECK-NEXT: vmv.v.v v8, v9 ; CHECK-NEXT: ret %1 = call fast @llvm.vp.fmul.nxv1f64( %a, %b, %m, i32 %vl) %2 = call fast @llvm.vp.fmul.nxv1f64( %c, %d, %m, i32 %vl) %3 = call fast @llvm.vp.fadd.nxv1f64( %1, %2, %m, i32 %vl) %4 = call fast @llvm.vp.fadd.nxv1f64( %3, %e, %m, i32 %vl) ret %4 }