; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=aarch64-linux-gnu -mattr=+sme2 -mattr=+sme-f64f64 -verify-machineinstrs | FileCheck %s ; FMLA (SINGLE) define void @multi_vector_add_single_vg1x2_s(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_add_single_vg1x2_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmla za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s ; CHECK-NEXT: fmla za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv4f32(i32 %slice, %zn0, %zn1, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv4f32(i32 %slice.7, %zn0, %zn1, %zm) ret void } define void @multi_vector_add_single_vg1x2_d(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_add_single_vg1x2_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmla za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d ; CHECK-NEXT: fmla za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv2f64(i32 %slice, %zn0, %zn1, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x2.nxv2f64(i32 %slice.7, %zn0, %zn1, %zm) ret void } define void @multi_vector_add_single_vg1x4_s(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_single_vg1x4_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmla za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s ; CHECK-NEXT: fmla za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv4f32(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv4f32(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm) ret void } define void @multi_vector_add_single_vg1x4_d(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_single_vg1x4_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmla za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d ; CHECK-NEXT: fmla za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv2f64(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.single.vg1x4.nxv2f64(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm) ret void } ; FMLS (SINGLE) define void @multi_vector_sub_single_vg1x2_s(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_sub_single_vg1x2_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmls za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s ; CHECK-NEXT: fmls za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv4f32(i32 %slice, %zn0, %zn1, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv4f32(i32 %slice.7, %zn0, %zn1, %zm) ret void } define void @multi_vector_sub_single_vg1x2_d(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_sub_single_vg1x2_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmls za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d ; CHECK-NEXT: fmls za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv2f64(i32 %slice, %zn0, %zn1, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x2.nxv2f64(i32 %slice.7, %zn0, %zn1, %zm) ret void } define void @multi_vector_sub_single_vg1x4_s(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_sub_single_vg1x4_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmls za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s ; CHECK-NEXT: fmls za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv4f32(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv4f32(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm) ret void } define void @multi_vector_sub_single_vg1x4_d(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_sub_single_vg1x4_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmls za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d ; CHECK-NEXT: fmls za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv2f64(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.single.vg1x4.nxv2f64(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm) ret void } ; FMLA (MULTI) define void @multi_vector_add_vg1x2_s(i32 %slice, %zn0, %zn1, ; CHECK-LABEL: multi_vector_add_vg1x2_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmla za.s[w8, 0, vgx2], { z0.s, z1.s }, { z2.s, z3.s } ; CHECK-NEXT: fmla za.s[w8, 7, vgx2], { z0.s, z1.s }, { z2.s, z3.s } ; CHECK-NEXT: ret %zm1, %zm2) { call void @llvm.aarch64.sme.fmla.vg1x2.nxv4f32(i32 %slice, %zn0, %zn1, %zm1, %zm2) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x2.nxv4f32(i32 %slice.7, %zn0, %zn1, %zm1, %zm2) ret void } define void @multi_vector_add_vg1x2_d(i32 %slice, %zn0, %zn1, ; CHECK-LABEL: multi_vector_add_vg1x2_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmla za.d[w8, 0, vgx2], { z0.d, z1.d }, { z2.d, z3.d } ; CHECK-NEXT: fmla za.d[w8, 7, vgx2], { z0.d, z1.d }, { z2.d, z3.d } ; CHECK-NEXT: ret %zm1, %zm2) { call void @llvm.aarch64.sme.fmla.vg1x2.nxv2f64(i32 %slice, %zn0, %zn1, %zm1, %zm2) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x2.nxv2f64(i32 %slice.7, %zn0, %zn1, %zm1, %zm2) ret void } ; Test to ensure the correct register class is used (first register in the list should be a multiple of 2) define void @multi_vector_add_vg1x2_s_regclass(i32 %slice, %zn0, %zn1, ; CHECK-LABEL: multi_vector_add_vg1x2_s_regclass: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z4.d, z3.d ; CHECK-NEXT: mov z6.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z5.d, z2.d ; CHECK-NEXT: mov z7.d, z0.d ; CHECK-NEXT: fmla za.s[w8, 0, vgx2], { z6.s, z7.s }, { z4.s, z5.s } ; CHECK-NEXT: ret %zm0, %zm1) { call void @llvm.aarch64.sme.fmla.vg1x2.nxv4f32(i32 %slice, %zn1, %zn0, %zm1, %zm0) ret void } define void @multi_vector_add_vg1x4_s(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_vg1x4_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmla za.s[w8, 0, vgx4], { z0.s - z3.s }, { z4.s - z7.s } ; CHECK-NEXT: fmla za.s[w8, 7, vgx4], { z0.s - z3.s }, { z4.s - z7.s } ; CHECK-NEXT: ret %zm1, %zm2, %zm3, %zm4) { call void @llvm.aarch64.sme.fmla.vg1x4.nxv4f32(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x4.nxv4f32(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) ret void } define void @multi_vector_add_vg1x4_d(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_vg1x4_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmla za.d[w8, 0, vgx4], { z0.d - z3.d }, { z4.d - z7.d } ; CHECK-NEXT: fmla za.d[w8, 7, vgx4], { z0.d - z3.d }, { z4.d - z7.d } ; CHECK-NEXT: ret %zm1, %zm2, %zm3, %zm4) { call void @llvm.aarch64.sme.fmla.vg1x4.nxv2f64(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.vg1x4.nxv2f64(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) ret void } ; Test to ensure the correct register class is used (first register in the list should be a multiple of 4) define void @multi_vector_add_vg1x4_s_regclass(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_vg1x4_s_regclass: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z7.d ; CHECK-NEXT: mov z30.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z6.d ; CHECK-NEXT: mov z29.d, z2.d ; CHECK-NEXT: mov z24.d, z5.d ; CHECK-NEXT: mov z28.d, z1.d ; CHECK-NEXT: mov z27.d, z4.d ; CHECK-NEXT: mov z31.d, z0.d ; CHECK-NEXT: fmla za.s[w8, 0, vgx4], { z28.s - z31.s }, { z24.s - z27.s } ; CHECK-NEXT: ret %zm0, %zm1, %zm2, %zm3) { call void @llvm.aarch64.sme.fmla.vg1x4.nxv4f32(i32 %slice, %zn1, %zn2, %zn3, %zn0, %zm1, %zm2, %zm3, %zm0) ret void } ; FMLS (MULTI) define void @multi_vector_sub_vg1x2_s(i32 %slice, %zn0, %zn1, ; CHECK-LABEL: multi_vector_sub_vg1x2_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmls za.s[w8, 0, vgx2], { z0.s, z1.s }, { z2.s, z3.s } ; CHECK-NEXT: fmls za.s[w8, 7, vgx2], { z0.s, z1.s }, { z2.s, z3.s } ; CHECK-NEXT: ret %zm1, %zm2) { call void @llvm.aarch64.sme.fmls.vg1x2.nxv4f32(i32 %slice, %zn0, %zn1, %zm1, %zm2) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x2.nxv4f32(i32 %slice.7, %zn0, %zn1, %zm1, %zm2) ret void } define void @multi_vector_sub_vg1x2_d(i32 %slice, %zn0, %zn1, ; CHECK-LABEL: multi_vector_sub_vg1x2_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z2_z3 def $z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmls za.d[w8, 0, vgx2], { z0.d, z1.d }, { z2.d, z3.d } ; CHECK-NEXT: fmls za.d[w8, 7, vgx2], { z0.d, z1.d }, { z2.d, z3.d } ; CHECK-NEXT: ret %zm1, %zm2) { call void @llvm.aarch64.sme.fmls.vg1x2.nxv2f64(i32 %slice, %zn0, %zn1, %zm1, %zm2) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x2.nxv2f64(i32 %slice.7, %zn0, %zn1, %zm1, %zm2) ret void } define void @multi_vector_sub_vg1x4_s(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_sub_vg1x4_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmls za.s[w8, 0, vgx4], { z0.s - z3.s }, { z4.s - z7.s } ; CHECK-NEXT: fmls za.s[w8, 7, vgx4], { z0.s - z3.s }, { z4.s - z7.s } ; CHECK-NEXT: ret %zm1, %zm2, %zm3, %zm4) { call void @llvm.aarch64.sme.fmls.vg1x4.nxv4f32(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x4.nxv4f32(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) ret void } define void @multi_vector_sub_vg1x4_d(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_sub_vg1x4_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z7 killed $z7 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z6 killed $z6 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z5 killed $z5 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z4 killed $z4 killed $z4_z5_z6_z7 def $z4_z5_z6_z7 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmls za.d[w8, 0, vgx4], { z0.d - z3.d }, { z4.d - z7.d } ; CHECK-NEXT: fmls za.d[w8, 7, vgx4], { z0.d - z3.d }, { z4.d - z7.d } ; CHECK-NEXT: ret %zm1, %zm2, %zm3, %zm4) { call void @llvm.aarch64.sme.fmls.vg1x4.nxv2f64(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.vg1x4.nxv2f64(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm1, %zm2, %zm3, %zm4) ret void } ; FMLA (INDEXED) define void @multi_vector_add_lane_vg1x2_s(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_add_lane_vg1x2_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmla za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s[3] ; CHECK-NEXT: fmla za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv4f32(i32 %slice, %zn0, %zn1, %zm, i32 3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv4f32(i32 %slice.7, %zn0, %zn1, %zm, i32 3) ret void } define void @multi_vector_add_lane_vg1x2_d(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_add_lane_vg1x2_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmla za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d[1] ; CHECK-NEXT: fmla za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d[1] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv2f64(i32 %slice, %zn0, %zn1, %zm, i32 1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv2f64(i32 %slice.7, %zn0, %zn1, %zm, i32 1) ret void } ; Test to ensure the correct register class is used (first register in the list should be a multiple of 2) define void @multi_vector_add_lane_vg1x2_s_regclass(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_add_lane_vg1x2_s_regclass: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z4.d, z1.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z5.d, z0.d ; CHECK-NEXT: fmla za.s[w8, 0, vgx2], { z4.s, z5.s }, z2.s[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv4f32(i32 %slice, %zn1, %zn0, %zm, i32 3) ret void } define void @multi_vector_add_lane_vg1x4_s(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_lane_vg1x4_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmla za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s[3] ; CHECK-NEXT: fmla za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s[3] ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv4f32(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv4f32(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm, i32 3) ret void } define void @multi_vector_add_lane_vg1x4_d(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_lane_vg1x4_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmla za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d[1] ; CHECK-NEXT: fmla za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d[1] ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv2f64(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv2f64(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm, i32 1) ret void } ; Test to ensure the correct register class is used (first register in the list should be a multiple of 4) define void @multi_vector_add_lane_vg1x4_s_regclass(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_add_lane_vg1x4_s_regclass: ; CHECK: // %bb.0: ; CHECK-NEXT: mov z26.d, z3.d ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: mov z25.d, z2.d ; CHECK-NEXT: mov z24.d, z1.d ; CHECK-NEXT: mov z27.d, z0.d ; CHECK-NEXT: fmla za.s[w8, 0, vgx4], { z24.s - z27.s }, z4.s[3] ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv4f32(i32 %slice, %zn1, %zn2, %zn3, %zn0, %zm, i32 3) ret void } ; FMLS (INDEXED) define void @multi_vector_sub_lane_vg1x2_s(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_sub_lane_vg1x2_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmls za.s[w8, 0, vgx2], { z0.s, z1.s }, z2.s[3] ; CHECK-NEXT: fmls za.s[w8, 7, vgx2], { z0.s, z1.s }, z2.s[3] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv4f32(i32 %slice, %zn0, %zn1, %zm, i32 3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv4f32(i32 %slice.7, %zn0, %zn1, %zm, i32 3) ret void } define void @multi_vector_sub_lane_vg1x2_d(i32 %slice, %zn0, %zn1, %zm) { ; CHECK-LABEL: multi_vector_sub_lane_vg1x2_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1 def $z0_z1 ; CHECK-NEXT: fmls za.d[w8, 0, vgx2], { z0.d, z1.d }, z2.d[1] ; CHECK-NEXT: fmls za.d[w8, 7, vgx2], { z0.d, z1.d }, z2.d[1] ; CHECK-NEXT: ret call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv2f64(i32 %slice, %zn0, %zn1, %zm, i32 1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv2f64(i32 %slice.7, %zn0, %zn1, %zm, i32 1) ret void } define void @multi_vector_sub_lane_vg1x4_s(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_sub_lane_vg1x4_s: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmls za.s[w8, 0, vgx4], { z0.s - z3.s }, z4.s[3] ; CHECK-NEXT: fmls za.s[w8, 7, vgx4], { z0.s - z3.s }, z4.s[3] ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv4f32(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 3) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv4f32(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm, i32 3) ret void } define void @multi_vector_sub_lane_vg1x4_d(i32 %slice, %zn0, %zn1, %zn2, %zn3, ; CHECK-LABEL: multi_vector_sub_lane_vg1x4_d: ; CHECK: // %bb.0: ; CHECK-NEXT: // kill: def $z3 killed $z3 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: mov w8, w0 ; CHECK-NEXT: // kill: def $z2 killed $z2 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z1 killed $z1 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: // kill: def $z0 killed $z0 killed $z0_z1_z2_z3 def $z0_z1_z2_z3 ; CHECK-NEXT: fmls za.d[w8, 0, vgx4], { z0.d - z3.d }, z4.d[1] ; CHECK-NEXT: fmls za.d[w8, 7, vgx4], { z0.d - z3.d }, z4.d[1] ; CHECK-NEXT: ret %zm) { call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv2f64(i32 %slice, %zn0, %zn1, %zn2, %zn3, %zm, i32 1) %slice.7 = add i32 %slice, 7 call void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv2f64(i32 %slice.7, %zn0, %zn1, %zn2, %zn3, %zm, i32 1) ret void } declare void @llvm.aarch64.sme.fmla.single.vg1x2.nxv4f32(i32, , , ) declare void @llvm.aarch64.sme.fmla.single.vg1x2.nxv2f64(i32, , , ) declare void @llvm.aarch64.sme.fmla.single.vg1x4.nxv4f32(i32, , , , , ) declare void @llvm.aarch64.sme.fmla.single.vg1x4.nxv2f64(i32, , , , , ) declare void @llvm.aarch64.sme.fmls.single.vg1x2.nxv4f32(i32, , , ) declare void @llvm.aarch64.sme.fmls.single.vg1x2.nxv2f64(i32, , , ) declare void @llvm.aarch64.sme.fmls.single.vg1x4.nxv4f32(i32, , , , , ) declare void @llvm.aarch64.sme.fmls.single.vg1x4.nxv2f64(i32, , , , , ) declare void @llvm.aarch64.sme.fmla.vg1x2.nxv4f32(i32, , , , ) declare void @llvm.aarch64.sme.fmla.vg1x2.nxv2f64(i32, , , , ) declare void @llvm.aarch64.sme.fmla.vg1x4.nxv4f32(i32, , , , , , , , ) declare void @llvm.aarch64.sme.fmla.vg1x4.nxv2f64(i32, , , , , , , , ) declare void @llvm.aarch64.sme.fmls.vg1x2.nxv4f32(i32, , , , ) declare void @llvm.aarch64.sme.fmls.vg1x2.nxv2f64(i32, , , , ) declare void @llvm.aarch64.sme.fmls.vg1x4.nxv4f32(i32, , , , , , , , ) declare void @llvm.aarch64.sme.fmls.vg1x4.nxv2f64(i32, , , , , , , , ) declare void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv4f32(i32, , , , i32) declare void @llvm.aarch64.sme.fmla.lane.vg1x2.nxv2f64(i32, , , , i32) declare void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv4f32(i32, , , , , , i32) declare void @llvm.aarch64.sme.fmla.lane.vg1x4.nxv2f64(i32, , , , , , i32) declare void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv4f32(i32, , , , i32) declare void @llvm.aarch64.sme.fmls.lane.vg1x2.nxv2f64(i32, , , , i32) declare void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv4f32(i32, , , , , , i32) declare void @llvm.aarch64.sme.fmls.lane.vg1x4.nxv2f64(i32, , , , , , i32)