; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve.fp -enable-arm-maskedldst %s -o - | FileCheck %s define arm_aapcs_vfpcc <4 x i32> @gather_inc_mini_4i32(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <4 x i32> %offs) { ; CHECK-LABEL: gather_inc_mini_4i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: movs r1, #4 ; CHECK-NEXT: vadd.i32 q1, q0, r1 ; CHECK-NEXT: vldrw.u32 q0, [r0, q1, uxtw #2] ; CHECK-NEXT: bx lr %1 = add <4 x i32> %offs, %2 = getelementptr inbounds i32, ptr %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %2, i32 4, <4 x i1> , <4 x i32> undef) ret <4 x i32> %wide.masked.gather } define arm_aapcs_vfpcc <4 x i32> @gather_inc_minipred_4i32(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <4 x i32> %offs) { ; CHECK-LABEL: gather_inc_minipred_4i32: ; CHECK: @ %bb.0: ; CHECK-NEXT: movs r1, #4 ; CHECK-NEXT: movw r2, #3855 ; CHECK-NEXT: vadd.i32 q1, q0, r1 ; CHECK-NEXT: vmsr p0, r2 ; CHECK-NEXT: vpst ; CHECK-NEXT: vldrwt.u32 q0, [r0, q1, uxtw #2] ; CHECK-NEXT: bx lr %1 = add <4 x i32> %offs, %2 = getelementptr inbounds i32, ptr %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %2, i32 4, <4 x i1> , <4 x i32> undef) ret <4 x i32> %wide.masked.gather } define arm_aapcs_vfpcc <8 x i16> @gather_inc_mini_8i16(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <8 x i32> %offs) { ; CHECK-LABEL: gather_inc_mini_8i16: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, lr} ; CHECK-NEXT: push {r4, r5, r6, lr} ; CHECK-NEXT: vshl.i32 q1, q1, #1 ; CHECK-NEXT: mov.w r12, #16 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q1, q1, r12 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r1, lr, d3 ; CHECK-NEXT: vadd.i32 q0, q0, r12 ; CHECK-NEXT: vmov r0, r3, d1 ; CHECK-NEXT: vmov r2, r4, d2 ; CHECK-NEXT: ldrh r6, [r1] ; CHECK-NEXT: vmov r1, r5, d0 ; CHECK-NEXT: ldrh r0, [r0] ; CHECK-NEXT: ldrh r3, [r3] ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r4, [r4] ; CHECK-NEXT: ldrh.w r12, [lr] ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: vmov.16 q0[0], r1 ; CHECK-NEXT: vmov.16 q0[1], r5 ; CHECK-NEXT: vmov.16 q0[2], r0 ; CHECK-NEXT: vmov.16 q0[3], r3 ; CHECK-NEXT: vmov.16 q0[4], r2 ; CHECK-NEXT: vmov.16 q0[5], r4 ; CHECK-NEXT: vmov.16 q0[6], r6 ; CHECK-NEXT: vmov.16 q0[7], r12 ; CHECK-NEXT: pop {r4, r5, r6, pc} %1 = add <8 x i32> %offs, %2 = getelementptr inbounds i16, ptr %data, <8 x i32> %1 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %2, i32 4, <8 x i1> , <8 x i16> undef) ret <8 x i16> %wide.masked.gather } define arm_aapcs_vfpcc <8 x i16> @gather_inc_minipred_8i16(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <8 x i32> %offs) { ; CHECK-LABEL: gather_inc_minipred_8i16: ; CHECK: @ %bb.0: ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: movs r1, #16 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vshl.i32 q1, q1, #1 ; CHECK-NEXT: vadd.i32 q0, q0, r1 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vmov r2, s0 ; CHECK-NEXT: vadd.i32 q1, q1, r1 ; CHECK-NEXT: vmov r3, s2 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r3, [r3] ; CHECK-NEXT: vmov.16 q0[0], r2 ; CHECK-NEXT: ldrh r0, [r0] ; CHECK-NEXT: vmov.16 q0[2], r3 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: vmov.16 q0[4], r0 ; CHECK-NEXT: vmov.16 q0[6], r1 ; CHECK-NEXT: bx lr %1 = add <8 x i32> %offs, %2 = getelementptr inbounds i16, ptr %data, <8 x i32> %1 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %2, i32 4, <8 x i1> , <8 x i16> undef) ret <8 x i16> %wide.masked.gather } define arm_aapcs_vfpcc <16 x i8> @gather_inc_mini_16i8(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <16 x i32> %offs) { ; CHECK-LABEL: gather_inc_mini_16i8: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r6, r7, lr} ; CHECK-NEXT: push {r4, r5, r6, r7, lr} ; CHECK-NEXT: movs r5, #16 ; CHECK-NEXT: vadd.i32 q3, q3, r0 ; CHECK-NEXT: vadd.i32 q3, q3, r5 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r1, r2, d7 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vmov r3, r4, d6 ; CHECK-NEXT: vadd.i32 q3, q0, r5 ; CHECK-NEXT: vadd.i32 q0, q2, r0 ; CHECK-NEXT: vadd.i32 q1, q1, r5 ; CHECK-NEXT: vadd.i32 q2, q0, r5 ; CHECK-NEXT: ldrb.w r12, [r1] ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: ldrb.w lr, [r2] ; CHECK-NEXT: ldrb r3, [r4] ; CHECK-NEXT: vmov r2, r4, d6 ; CHECK-NEXT: ldrb r2, [r2] ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: vmov.8 q0[0], r2 ; CHECK-NEXT: vmov r2, r6, d5 ; CHECK-NEXT: vmov.8 q0[1], r4 ; CHECK-NEXT: ldrb r4, [r2] ; CHECK-NEXT: ldrb r2, [r6] ; CHECK-NEXT: vmov r6, r7, d7 ; CHECK-NEXT: ldrb r0, [r6] ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q0[2], r0 ; CHECK-NEXT: vmov r0, r5, d2 ; CHECK-NEXT: vmov.8 q0[3], r7 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov.8 q0[4], r0 ; CHECK-NEXT: vmov.8 q0[5], r5 ; CHECK-NEXT: vmov r0, r5, d3 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov.8 q0[6], r0 ; CHECK-NEXT: vmov.8 q0[7], r5 ; CHECK-NEXT: vmov r0, r5, d4 ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov.8 q0[8], r0 ; CHECK-NEXT: vmov.8 q0[9], r5 ; CHECK-NEXT: vmov.8 q0[10], r4 ; CHECK-NEXT: vmov.8 q0[11], r2 ; CHECK-NEXT: vmov.8 q0[12], r1 ; CHECK-NEXT: vmov.8 q0[13], r3 ; CHECK-NEXT: vmov.8 q0[14], r12 ; CHECK-NEXT: vmov.8 q0[15], lr ; CHECK-NEXT: pop {r4, r5, r6, r7, pc} %1 = add <16 x i32> %offs, %2 = getelementptr inbounds i8, ptr %data, <16 x i32> %1 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %2, i32 2, <16 x i1> , <16 x i8> undef) ret <16 x i8> %wide.masked.gather } define arm_aapcs_vfpcc <16 x i8> @gather_inc_minipred_16i8(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, <16 x i32> %offs) { ; CHECK-LABEL: gather_inc_minipred_16i8: ; CHECK: @ %bb.0: ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: movs r1, #16 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vadd.i32 q1, q1, r1 ; CHECK-NEXT: vadd.i32 q2, q2, r0 ; CHECK-NEXT: vmov r2, s4 ; CHECK-NEXT: vadd.i32 q2, q2, r1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r3, s10 ; CHECK-NEXT: vadd.i32 q0, q0, r1 ; CHECK-NEXT: vmov r4, s0 ; CHECK-NEXT: vmov r5, s2 ; CHECK-NEXT: ldrb.w r12, [r2] ; CHECK-NEXT: vmov r2, s8 ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vmov.8 q0[0], r4 ; CHECK-NEXT: vmov.8 q0[2], r5 ; CHECK-NEXT: vmov.8 q0[4], r12 ; CHECK-NEXT: ldrb.w lr, [r2] ; CHECK-NEXT: vmov r2, s6 ; CHECK-NEXT: vadd.i32 q1, q3, r0 ; CHECK-NEXT: vadd.i32 q1, q1, r1 ; CHECK-NEXT: vmov r0, s4 ; CHECK-NEXT: vmov r1, s6 ; CHECK-NEXT: ldrb r2, [r2] ; CHECK-NEXT: vmov.8 q0[6], r2 ; CHECK-NEXT: vmov.8 q0[8], lr ; CHECK-NEXT: ldrb r0, [r0] ; CHECK-NEXT: vmov.8 q0[10], r3 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q0[12], r0 ; CHECK-NEXT: vmov.8 q0[14], r1 ; CHECK-NEXT: pop {r4, r5, r7, pc} %1 = add <16 x i32> %offs, %2 = getelementptr inbounds i8, ptr %data, <16 x i32> %1 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %2, i32 2, <16 x i1> , <16 x i8> undef) ret <16 x i8> %wide.masked.gather } define arm_aapcs_vfpcc void @gather_pre_inc(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n.vec) { ; CHECK-LABEL: gather_pre_inc: ; CHECK: @ %bb.0: @ %vector.ph ; CHECK-NEXT: adr r3, .LCPI6_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB6_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q1, [q0, #96]! ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vstrb.8 q1, [r1], #16 ; CHECK-NEXT: bne .LBB6_1 ; CHECK-NEXT: @ %bb.2: @ %end ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: .LCPI6_0: ; CHECK-NEXT: .long 4294967224 @ 0xffffffb8 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8 ; CHECK-NEXT: .long 0 @ 0x0 vector.ph: ; preds = %for.body.preheader br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul <4 x i32> %vec.ind, %1 = add <4 x i32> %0, %2 = getelementptr inbounds i32, ptr %data, <4 x i32> %1 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %2, i32 4, <4 x i1> , <4 x i32> undef) %3 = getelementptr inbounds i32, ptr %dst, i32 %index store <4 x i32> %wide.masked.gather, ptr %3, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %4 = icmp eq i32 %index.next, %n.vec br i1 %4, label %end, label %vector.body end: ret void; } define arm_aapcs_vfpcc void @gather_post_inc(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n.vec43) { ; CHECK-LABEL: gather_post_inc: ; CHECK: @ %bb.0: @ %vector.ph41 ; CHECK-NEXT: adr r3, .LCPI7_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB7_1: @ %vector.body39 ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q1, [q0, #96]! ; CHECK-NEXT: subs r2, #4 ; CHECK-NEXT: vstrb.8 q1, [r1], #16 ; CHECK-NEXT: bne .LBB7_1 ; CHECK-NEXT: @ %bb.2: @ %end ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.3: ; CHECK-NEXT: .LCPI7_0: ; CHECK-NEXT: .long 4294967200 @ 0xffffffa0 ; CHECK-NEXT: .long 4294967224 @ 0xffffffb8 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8 vector.ph41: ; preds = %for.body6.preheader br label %vector.body39 vector.body39: ; preds = %vector.body39, %vector.ph41 %index44 = phi i32 [ 0, %vector.ph41 ], [ %index.next45, %vector.body39 ] %vec.ind50 = phi <4 x i32> [ , %vector.ph41 ], [ %vec.ind.next51, %vector.body39 ] %0 = mul nuw nsw <4 x i32> %vec.ind50, %1 = getelementptr inbounds i32, ptr %data, <4 x i32> %0 %wide.masked.gather55 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> , <4 x i32> undef) %2 = getelementptr inbounds i32, ptr %dst, i32 %index44 store <4 x i32> %wide.masked.gather55, ptr %2, align 4 %index.next45 = add i32 %index44, 4 %vec.ind.next51 = add <4 x i32> %vec.ind50, %3 = icmp eq i32 %index.next45, %n.vec43 br i1 %3, label %end, label %vector.body39 end: ret void; } define arm_aapcs_vfpcc void @gather_inc_v4i32_simple(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v4i32_simple: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: it lt ; CHECK-NEXT: bxlt lr ; CHECK-NEXT: .LBB8_1: @ %vector.ph.preheader ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: bic r12, r2, #3 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: sub.w lr, r12, #4 ; CHECK-NEXT: add.w r4, r3, lr, lsr #2 ; CHECK-NEXT: adr r3, .LCPI8_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB8_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB8_3 Depth 2 ; CHECK-NEXT: dls lr, r4 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: .LBB8_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB8_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vldrw.u32 q2, [q1, #16]! ; CHECK-NEXT: vstrb.8 q2, [r0], #16 ; CHECK-NEXT: le lr, .LBB8_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB8_2 Depth=1 ; CHECK-NEXT: cmp r12, r2 ; CHECK-NEXT: bne .LBB8_2 ; CHECK-NEXT: @ %bb.5: ; CHECK-NEXT: pop.w {r4, lr} ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI8_0: ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0 ; CHECK-NEXT: .long 4294967284 @ 0xfffffff4 ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8 ; CHECK-NEXT: .long 4294967292 @ 0xfffffffc entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i32, ptr %data, <4 x i32> %vec.ind %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %0, i32 4, <4 x i1> , <4 x i32> undef) %1 = getelementptr inbounds i32, ptr %dst, i32 %index store <4 x i32> %wide.masked.gather, ptr %1, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %2 = icmp eq i32 %index.next, %n.vec br i1 %2, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v4i32_complex(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v4i32_complex: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: it lt ; CHECK-NEXT: bxlt lr ; CHECK-NEXT: .LBB9_1: @ %vector.ph.preheader ; CHECK-NEXT: .save {r4, r5, r7, lr} ; CHECK-NEXT: push {r4, r5, r7, lr} ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: bic r12, r2, #3 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: sub.w lr, r12, #4 ; CHECK-NEXT: adr r4, .LCPI9_1 ; CHECK-NEXT: adr r5, .LCPI9_2 ; CHECK-NEXT: vldrw.u32 q1, [r4] ; CHECK-NEXT: add.w r3, r3, lr, lsr #2 ; CHECK-NEXT: adr.w lr, .LCPI9_0 ; CHECK-NEXT: vldrw.u32 q0, [r5] ; CHECK-NEXT: vldrw.u32 q2, [lr] ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vadd.i32 q2, q2, r0 ; CHECK-NEXT: .LBB9_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB9_3 Depth 2 ; CHECK-NEXT: dls lr, r3 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vmov q3, q1 ; CHECK-NEXT: vmov q4, q0 ; CHECK-NEXT: vmov q5, q2 ; CHECK-NEXT: .LBB9_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB9_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vldrw.u32 q6, [q5, #48]! ; CHECK-NEXT: vldrw.u32 q7, [q3, #48]! ; CHECK-NEXT: vadd.i32 q6, q7, q6 ; CHECK-NEXT: vldrw.u32 q7, [q4, #48]! ; CHECK-NEXT: vadd.i32 q6, q6, q7 ; CHECK-NEXT: vstrb.8 q6, [r0], #16 ; CHECK-NEXT: le lr, .LBB9_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB9_2 Depth=1 ; CHECK-NEXT: cmp r12, r2 ; CHECK-NEXT: bne .LBB9_2 ; CHECK-NEXT: @ %bb.5: ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: pop.w {r4, r5, r7, lr} ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI9_0: ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967260 @ 0xffffffdc ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8 ; CHECK-NEXT: .long 4294967284 @ 0xfffffff4 ; CHECK-NEXT: .LCPI9_1: ; CHECK-NEXT: .long 4294967252 @ 0xffffffd4 ; CHECK-NEXT: .long 4294967264 @ 0xffffffe0 ; CHECK-NEXT: .long 4294967276 @ 0xffffffec ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8 ; CHECK-NEXT: .LCPI9_2: ; CHECK-NEXT: .long 4294967256 @ 0xffffffd8 ; CHECK-NEXT: .long 4294967268 @ 0xffffffe4 ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0 ; CHECK-NEXT: .long 4294967292 @ 0xfffffffc entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul nuw nsw <4 x i32> %vec.ind, %1 = getelementptr inbounds i32, ptr %data, <4 x i32> %0 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> , <4 x i32> undef) %2 = add nuw nsw <4 x i32> %0, %3 = getelementptr inbounds i32, ptr %data, <4 x i32> %2 %wide.masked.gather24 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %3, i32 4, <4 x i1> , <4 x i32> undef) %4 = add nuw nsw <4 x i32> %0, %5 = getelementptr inbounds i32, ptr %data, <4 x i32> %4 %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %5, i32 4, <4 x i1> , <4 x i32> undef) %6 = add nsw <4 x i32> %wide.masked.gather24, %wide.masked.gather %7 = add nsw <4 x i32> %6, %wide.masked.gather25 %8 = getelementptr inbounds i32, ptr %dst, i32 %index store <4 x i32> %7, ptr %8, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %9 = icmp eq i32 %index.next, %n.vec br i1 %9, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v4i32_large(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v4i32_large: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: it lt ; CHECK-NEXT: bxlt lr ; CHECK-NEXT: .LBB10_1: @ %vector.ph.preheader ; CHECK-NEXT: .save {r4, lr} ; CHECK-NEXT: push {r4, lr} ; CHECK-NEXT: bic r12, r2, #3 ; CHECK-NEXT: movs r3, #1 ; CHECK-NEXT: sub.w lr, r12, #4 ; CHECK-NEXT: add.w r4, r3, lr, lsr #2 ; CHECK-NEXT: adr r3, .LCPI10_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: .LBB10_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB10_3 Depth 2 ; CHECK-NEXT: dls lr, r4 ; CHECK-NEXT: mov r0, r1 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: .LBB10_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB10_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vldrw.u32 q2, [q1, #508]! ; CHECK-NEXT: vstrb.8 q2, [r0], #16 ; CHECK-NEXT: le lr, .LBB10_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB10_2 Depth=1 ; CHECK-NEXT: cmp r12, r2 ; CHECK-NEXT: bne .LBB10_2 ; CHECK-NEXT: @ %bb.5: ; CHECK-NEXT: pop.w {r4, lr} ; CHECK-NEXT: bx lr ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI10_0: ; CHECK-NEXT: .long 4294966788 @ 0xfffffe04 ; CHECK-NEXT: .long 4294966792 @ 0xfffffe08 ; CHECK-NEXT: .long 4294966796 @ 0xfffffe0c ; CHECK-NEXT: .long 4294966800 @ 0xfffffe10 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i32, ptr %data, <4 x i32> %vec.ind %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %0, i32 4, <4 x i1> , <4 x i32> undef) %1 = getelementptr inbounds i32, ptr %dst, i32 %index store <4 x i32> %wide.masked.gather, ptr %1, align 4 %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %2 = icmp eq i32 %index.next, %n.vec br i1 %2, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } ; TODO: uneven - I think it's not possible to create such an example, because vec.ind will always be increased by a vector with 4 elements (=> x*4 = even) ; TODO: What is sxth? define arm_aapcs_vfpcc void @gather_inc_v8i16_simple(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v8i16_simple: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #28 ; CHECK-NEXT: sub sp, #28 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: mov r1, r2 ; CHECK-NEXT: str r2, [sp, #8] @ 4-byte Spill ; CHECK-NEXT: blt .LBB11_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: movs r6, #1 ; CHECK-NEXT: add r2, sp, #12 ; CHECK-NEXT: mov.w r9, #8 ; CHECK-NEXT: bic r1, r1, #7 ; CHECK-NEXT: str r1, [sp] @ 4-byte Spill ; CHECK-NEXT: sub.w r3, r1, #8 ; CHECK-NEXT: add.w r8, r6, r3, lsr #3 ; CHECK-NEXT: adr r3, .LCPI11_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: .LBB11_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB11_3 Depth 2 ; CHECK-NEXT: dls lr, r8 ; CHECK-NEXT: vmov q1, q0 ; CHECK-NEXT: ldr r6, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: .LBB11_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB11_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vstrw.32 q1, [r2] ; CHECK-NEXT: mov r12, r2 ; CHECK-NEXT: vldrh.s32 q2, [r2, #8] ; CHECK-NEXT: vadd.i16 q1, q1, r9 ; CHECK-NEXT: vshl.i32 q2, q2, #1 ; CHECK-NEXT: vadd.i32 q2, q2, r0 ; CHECK-NEXT: vmov r7, r5, d5 ; CHECK-NEXT: vmov r3, r4, d4 ; CHECK-NEXT: vldrh.s32 q2, [r2] ; CHECK-NEXT: vshl.i32 q2, q2, #1 ; CHECK-NEXT: vadd.i32 q2, q2, r0 ; CHECK-NEXT: vmov r1, r10, d5 ; CHECK-NEXT: ldrh r7, [r7] ; CHECK-NEXT: ldrh r4, [r4] ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: ldrh.w r2, [r10] ; CHECK-NEXT: ldrh.w r10, [r3] ; CHECK-NEXT: vmov r3, r11, d4 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: ldrh r3, [r3] ; CHECK-NEXT: ldrh.w r11, [r11] ; CHECK-NEXT: vmov.16 q2[0], r3 ; CHECK-NEXT: vmov.16 q2[1], r11 ; CHECK-NEXT: vmov.16 q2[2], r1 ; CHECK-NEXT: vmov.16 q2[3], r2 ; CHECK-NEXT: mov r2, r12 ; CHECK-NEXT: vmov.16 q2[4], r10 ; CHECK-NEXT: vmov.16 q2[5], r4 ; CHECK-NEXT: vmov.16 q2[6], r7 ; CHECK-NEXT: vmov.16 q2[7], r5 ; CHECK-NEXT: vstrb.8 q2, [r6], #16 ; CHECK-NEXT: le lr, .LBB11_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB11_2 Depth=1 ; CHECK-NEXT: ldr r1, [sp, #8] @ 4-byte Reload ; CHECK-NEXT: ldr r3, [sp] @ 4-byte Reload ; CHECK-NEXT: cmp r3, r1 ; CHECK-NEXT: bne .LBB11_2 ; CHECK-NEXT: .LBB11_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #28 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI11_0: ; CHECK-NEXT: .short 0 @ 0x0 ; CHECK-NEXT: .short 1 @ 0x1 ; CHECK-NEXT: .short 2 @ 0x2 ; CHECK-NEXT: .short 3 @ 0x3 ; CHECK-NEXT: .short 4 @ 0x4 ; CHECK-NEXT: .short 5 @ 0x5 ; CHECK-NEXT: .short 6 @ 0x6 ; CHECK-NEXT: .short 7 @ 0x7 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <8 x i16> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i16, ptr %data, <8 x i16> %vec.ind %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %0, i32 2, <8 x i1> , <8 x i16> undef) %1 = getelementptr inbounds i16, ptr %dst, i32 %index store <8 x i16> %wide.masked.gather, ptr %1, align 2 %index.next = add i32 %index, 8 %vec.ind.next = add <8 x i16> %vec.ind, %2 = icmp eq i32 %index.next, %n.vec br i1 %2, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } ; TODO: This looks absolutely terrifying :( define arm_aapcs_vfpcc void @gather_inc_v8i16_complex(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v8i16_complex: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #136 ; CHECK-NEXT: sub sp, #136 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: str r1, [sp, #64] @ 4-byte Spill ; CHECK-NEXT: mov r1, r2 ; CHECK-NEXT: str r2, [sp, #68] @ 4-byte Spill ; CHECK-NEXT: blt.w .LBB12_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: ldr r1, [sp, #68] @ 4-byte Reload ; CHECK-NEXT: adr r3, .LCPI12_2 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: movs r2, #1 ; CHECK-NEXT: bic r1, r1, #7 ; CHECK-NEXT: str r1, [sp, #4] @ 4-byte Spill ; CHECK-NEXT: subs r1, #8 ; CHECK-NEXT: vstrw.32 q0, [sp, #40] @ 16-byte Spill ; CHECK-NEXT: vmov.i16 q2, #0x18 ; CHECK-NEXT: add.w r1, r2, r1, lsr #3 ; CHECK-NEXT: str r1, [sp, #60] @ 4-byte Spill ; CHECK-NEXT: adr r1, .LCPI12_0 ; CHECK-NEXT: adr r2, .LCPI12_1 ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: vstrw.32 q2, [sp, #72] @ 16-byte Spill ; CHECK-NEXT: vstrw.32 q0, [sp, #24] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r2] ; CHECK-NEXT: add r2, sp, #120 ; CHECK-NEXT: vstrw.32 q0, [sp, #8] @ 16-byte Spill ; CHECK-NEXT: .LBB12_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB12_3 Depth 2 ; CHECK-NEXT: ldr r1, [sp, #60] @ 4-byte Reload ; CHECK-NEXT: add.w r10, sp, #104 ; CHECK-NEXT: dls lr, r1 ; CHECK-NEXT: ldr r7, [sp, #64] @ 4-byte Reload ; CHECK-NEXT: vldrw.u32 q4, [sp, #24] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q5, [sp, #40] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q6, [sp, #8] @ 16-byte Reload ; CHECK-NEXT: .LBB12_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB12_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vstrw.32 q5, [r2] ; CHECK-NEXT: mov r8, r2 ; CHECK-NEXT: vldrh.s32 q0, [r2, #8] ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r1, r3, d0 ; CHECK-NEXT: vmov r4, r5, d1 ; CHECK-NEXT: vldrh.s32 q0, [r2] ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q2, q0, r0 ; CHECK-NEXT: vmov r6, r2, d4 ; CHECK-NEXT: ldrh r1, [r1] ; CHECK-NEXT: ldrh.w r12, [r4] ; CHECK-NEXT: add r4, sp, #88 ; CHECK-NEXT: ldrh.w r11, [r5] ; CHECK-NEXT: ldrh r3, [r3] ; CHECK-NEXT: ldrh r5, [r6] ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: vstrw.32 q6, [r4] ; CHECK-NEXT: vldrh.s32 q0, [r4] ; CHECK-NEXT: vmov.16 q7[0], r5 ; CHECK-NEXT: vmov.16 q7[1], r2 ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r6, r9, d0 ; CHECK-NEXT: vmov r2, r5, d1 ; CHECK-NEXT: vldrh.s32 q0, [r4, #8] ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: ldrh r6, [r6] ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: vmov.16 q1[0], r6 ; CHECK-NEXT: ldrh.w r6, [r9] ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: vmov.16 q1[1], r6 ; CHECK-NEXT: vmov.16 q1[2], r2 ; CHECK-NEXT: vmov r2, r6, d0 ; CHECK-NEXT: vmov.16 q1[3], r5 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r6, [r6] ; CHECK-NEXT: vmov.16 q1[4], r2 ; CHECK-NEXT: vmov r2, r5, d1 ; CHECK-NEXT: vmov.16 q1[5], r6 ; CHECK-NEXT: mov r6, r10 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: vstrw.32 q4, [r10] ; CHECK-NEXT: vldrh.s32 q0, [r6] ; CHECK-NEXT: vmov.16 q1[6], r2 ; CHECK-NEXT: vmov.16 q1[7], r5 ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov r2, r5, d0 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: vmov.16 q3[0], r2 ; CHECK-NEXT: vmov.16 q3[1], r5 ; CHECK-NEXT: vmov r2, r5, d5 ; CHECK-NEXT: vldrw.u32 q2, [sp, #72] @ 16-byte Reload ; CHECK-NEXT: vadd.i16 q6, q6, q2 ; CHECK-NEXT: vadd.i16 q5, q5, q2 ; CHECK-NEXT: vadd.i16 q4, q4, q2 ; CHECK-NEXT: ldrh.w r9, [r2] ; CHECK-NEXT: vmov r2, r4, d1 ; CHECK-NEXT: vldrh.s32 q0, [r6, #8] ; CHECK-NEXT: ldrh r5, [r5] ; CHECK-NEXT: vmov.16 q7[2], r9 ; CHECK-NEXT: vshl.i32 q0, q0, #1 ; CHECK-NEXT: vmov.16 q7[3], r5 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov.16 q7[4], r1 ; CHECK-NEXT: vmov.16 q7[5], r3 ; CHECK-NEXT: vmov.16 q7[6], r12 ; CHECK-NEXT: vmov.16 q7[7], r11 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r4, [r4] ; CHECK-NEXT: vmov.16 q3[2], r2 ; CHECK-NEXT: vmov.16 q3[3], r4 ; CHECK-NEXT: vmov r2, r4, d0 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r4, [r4] ; CHECK-NEXT: vmov.16 q3[4], r2 ; CHECK-NEXT: vmov.16 q3[5], r4 ; CHECK-NEXT: vmov r2, r4, d1 ; CHECK-NEXT: ldrh r2, [r2] ; CHECK-NEXT: ldrh r4, [r4] ; CHECK-NEXT: vmov.16 q3[6], r2 ; CHECK-NEXT: mov r2, r8 ; CHECK-NEXT: vmov.16 q3[7], r4 ; CHECK-NEXT: vadd.i16 q0, q3, q1 ; CHECK-NEXT: vadd.i16 q0, q0, q7 ; CHECK-NEXT: vstrb.8 q0, [r7], #16 ; CHECK-NEXT: le lr, .LBB12_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB12_2 Depth=1 ; CHECK-NEXT: ldr r1, [sp, #4] @ 4-byte Reload ; CHECK-NEXT: ldr r3, [sp, #68] @ 4-byte Reload ; CHECK-NEXT: cmp r1, r3 ; CHECK-NEXT: bne.w .LBB12_2 ; CHECK-NEXT: .LBB12_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #136 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI12_0: ; CHECK-NEXT: .short 1 @ 0x1 ; CHECK-NEXT: .short 4 @ 0x4 ; CHECK-NEXT: .short 7 @ 0x7 ; CHECK-NEXT: .short 10 @ 0xa ; CHECK-NEXT: .short 13 @ 0xd ; CHECK-NEXT: .short 16 @ 0x10 ; CHECK-NEXT: .short 19 @ 0x13 ; CHECK-NEXT: .short 22 @ 0x16 ; CHECK-NEXT: .LCPI12_1: ; CHECK-NEXT: .short 0 @ 0x0 ; CHECK-NEXT: .short 3 @ 0x3 ; CHECK-NEXT: .short 6 @ 0x6 ; CHECK-NEXT: .short 9 @ 0x9 ; CHECK-NEXT: .short 12 @ 0xc ; CHECK-NEXT: .short 15 @ 0xf ; CHECK-NEXT: .short 18 @ 0x12 ; CHECK-NEXT: .short 21 @ 0x15 ; CHECK-NEXT: .LCPI12_2: ; CHECK-NEXT: .short 2 @ 0x2 ; CHECK-NEXT: .short 5 @ 0x5 ; CHECK-NEXT: .short 8 @ 0x8 ; CHECK-NEXT: .short 11 @ 0xb ; CHECK-NEXT: .short 14 @ 0xe ; CHECK-NEXT: .short 17 @ 0x11 ; CHECK-NEXT: .short 20 @ 0x14 ; CHECK-NEXT: .short 23 @ 0x17 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <8 x i16> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul nuw nsw <8 x i16> %vec.ind, %1 = getelementptr inbounds i16, ptr %data, <8 x i16> %0 %wide.masked.gather = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %1, i32 2, <8 x i1> , <8 x i16> undef) %2 = add nuw nsw <8 x i16> %0, %3 = getelementptr inbounds i16, ptr %data, <8 x i16> %2 %wide.masked.gather24 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %3, i32 2, <8 x i1> , <8 x i16> undef) %4 = add nuw nsw <8 x i16> %0, %5 = getelementptr inbounds i16, ptr %data, <8 x i16> %4 %wide.masked.gather25 = call <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr> %5, i32 2, <8 x i1> , <8 x i16> undef) %6 = add nsw <8 x i16> %wide.masked.gather24, %wide.masked.gather %7 = add nsw <8 x i16> %6, %wide.masked.gather25 %8 = getelementptr inbounds i16, ptr %dst, i32 %index store <8 x i16> %7, ptr %8, align 2 %index.next = add i32 %index, 8 %vec.ind.next = add <8 x i16> %vec.ind, %9 = icmp eq i32 %index.next, %n.vec br i1 %9, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v16i8_complex(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v16i8_complex: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #312 ; CHECK-NEXT: sub sp, #312 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: str r1, [sp, #116] @ 4-byte Spill ; CHECK-NEXT: blt.w .LBB13_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: adr r1, .LCPI13_0 ; CHECK-NEXT: adr r6, .LCPI13_8 ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: adr r1, .LCPI13_1 ; CHECK-NEXT: adr r7, .LCPI13_7 ; CHECK-NEXT: adr r3, .LCPI13_6 ; CHECK-NEXT: vstrw.32 q0, [sp, #96] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: adr r1, .LCPI13_5 ; CHECK-NEXT: bic r10, r2, #7 ; CHECK-NEXT: vstrw.32 q0, [sp, #80] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: adr r6, .LCPI13_9 ; CHECK-NEXT: vmov.i32 q2, #0x30 ; CHECK-NEXT: vstrw.32 q0, [sp, #64] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r7] ; CHECK-NEXT: vstrw.32 q0, [sp, #48] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill ; CHECK-NEXT: .LBB13_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB13_3 Depth 2 ; CHECK-NEXT: adr r1, .LCPI13_3 ; CHECK-NEXT: vldrw.u32 q6, [sp, #16] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q0, [r1] ; CHECK-NEXT: adr r1, .LCPI13_4 ; CHECK-NEXT: vldrw.u32 q5, [r1] ; CHECK-NEXT: adr r1, .LCPI13_2 ; CHECK-NEXT: vldrw.u32 q3, [r1] ; CHECK-NEXT: adr r1, .LCPI13_10 ; CHECK-NEXT: vstrw.32 q6, [sp, #280] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q6, [sp, #32] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q3, [sp, #296] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [r1] ; CHECK-NEXT: adr r1, .LCPI13_11 ; CHECK-NEXT: ldr.w r8, [sp, #116] @ 4-byte Reload ; CHECK-NEXT: vstrw.32 q3, [sp, #248] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [sp, #80] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q6, [sp, #264] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q6, [sp, #48] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q3, [sp, #216] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [sp, #64] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q7, [r1] ; CHECK-NEXT: vldrw.u32 q1, [sp] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q3, [sp, #200] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [sp, #96] @ 16-byte Reload ; CHECK-NEXT: mov r11, r10 ; CHECK-NEXT: vstrw.32 q6, [sp, #232] @ 16-byte Spill ; CHECK-NEXT: vstrw.32 q3, [sp, #184] @ 16-byte Spill ; CHECK-NEXT: .LBB13_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB13_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vadd.i32 q4, q1, r0 ; CHECK-NEXT: vstrw.32 q7, [sp, #136] @ 16-byte Spill ; CHECK-NEXT: vmov r1, lr, d8 ; CHECK-NEXT: vadd.i32 q7, q7, r0 ; CHECK-NEXT: vmov r5, r4, d15 ; CHECK-NEXT: vadd.i32 q6, q0, r0 ; CHECK-NEXT: vmov r6, r7, d13 ; CHECK-NEXT: vstrw.32 q1, [sp, #152] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q1, [sp, #296] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q0, [sp, #168] @ 16-byte Spill ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: vmov q3, q5 ; CHECK-NEXT: vadd.i32 q1, q1, r0 ; CHECK-NEXT: vldrw.u32 q0, [sp, #248] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q3, [sp, #216] @ 16-byte Reload ; CHECK-NEXT: vstrw.32 q5, [sp, #120] @ 16-byte Spill ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: subs.w r11, r11, #16 ; CHECK-NEXT: ldrb.w r9, [r1] ; CHECK-NEXT: vmov r1, r3, d14 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[0], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[1], r1 ; CHECK-NEXT: vmov r1, r3, d12 ; CHECK-NEXT: vmov.8 q7[2], r5 ; CHECK-NEXT: ldrb r5, [r6] ; CHECK-NEXT: ldrb r6, [r4] ; CHECK-NEXT: vmov.8 q7[3], r6 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: vmov.8 q6[0], r1 ; CHECK-NEXT: vmov r6, r1, d2 ; CHECK-NEXT: vmov.8 q6[1], r3 ; CHECK-NEXT: vmov.8 q6[2], r5 ; CHECK-NEXT: vmov.8 q6[3], r7 ; CHECK-NEXT: ldrb.w r7, [lr] ; CHECK-NEXT: vmov.8 q6[4], r9 ; CHECK-NEXT: vmov.8 q6[5], r7 ; CHECK-NEXT: ldrb r4, [r1] ; CHECK-NEXT: vmov r1, r5, d3 ; CHECK-NEXT: vldrw.u32 q1, [sp, #232] @ 16-byte Reload ; CHECK-NEXT: ldrb.w r12, [r1] ; CHECK-NEXT: vmov r1, r3, d9 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: vldrw.u32 q4, [sp, #184] @ 16-byte Reload ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: vmov.8 q6[6], r1 ; CHECK-NEXT: vmov r1, r7, d0 ; CHECK-NEXT: vmov.8 q6[7], r3 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q7[4], r1 ; CHECK-NEXT: vmov r1, r3, d1 ; CHECK-NEXT: vldrw.u32 q0, [sp, #264] @ 16-byte Reload ; CHECK-NEXT: vmov.8 q7[5], r7 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: vmov.8 q7[6], r1 ; CHECK-NEXT: ldrb r1, [r6] ; CHECK-NEXT: vmov r7, r6, d0 ; CHECK-NEXT: vmov.8 q7[7], r3 ; CHECK-NEXT: vmov r3, lr, d1 ; CHECK-NEXT: vldrw.u32 q0, [sp, #280] @ 16-byte Reload ; CHECK-NEXT: vmov.8 q7[8], r1 ; CHECK-NEXT: vadd.i32 q0, q0, r0 ; CHECK-NEXT: vmov.8 q7[9], r4 ; CHECK-NEXT: vmov r4, r1, d0 ; CHECK-NEXT: vmov.8 q7[10], r12 ; CHECK-NEXT: vmov.8 q7[11], r5 ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q6[8], r4 ; CHECK-NEXT: vmov r5, r4, d1 ; CHECK-NEXT: vmov.8 q6[9], r1 ; CHECK-NEXT: vadd.i32 q0, q5, r0 ; CHECK-NEXT: vldrw.u32 q5, [sp, #200] @ 16-byte Reload ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: vmov.8 q6[10], r5 ; CHECK-NEXT: vmov.8 q6[11], r4 ; CHECK-NEXT: vmov.8 q6[12], r7 ; CHECK-NEXT: vmov.8 q6[13], r6 ; CHECK-NEXT: vmov.8 q6[14], r3 ; CHECK-NEXT: vmov r1, r3, d0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[12], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[13], r1 ; CHECK-NEXT: vmov r1, r3, d1 ; CHECK-NEXT: vadd.i32 q0, q1, r0 ; CHECK-NEXT: vadd.i32 q1, q1, q2 ; CHECK-NEXT: vstrw.32 q1, [sp, #232] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q1, [sp, #248] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q1, q1, q2 ; CHECK-NEXT: vstrw.32 q1, [sp, #248] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q1, [sp, #152] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q1, q1, q2 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[14], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[15], r1 ; CHECK-NEXT: ldrb.w r1, [lr] ; CHECK-NEXT: vmov.8 q6[15], r1 ; CHECK-NEXT: vmov r1, r3, d0 ; CHECK-NEXT: vadd.i8 q6, q6, q7 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: ldrb r3, [r3] ; CHECK-NEXT: vmov.8 q7[0], r1 ; CHECK-NEXT: vmov.8 q7[1], r3 ; CHECK-NEXT: vmov r1, r3, d1 ; CHECK-NEXT: vadd.i32 q0, q3, r0 ; CHECK-NEXT: vadd.i32 q3, q3, q2 ; CHECK-NEXT: vstrw.32 q3, [sp, #216] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [sp, #296] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q3, q3, q2 ; CHECK-NEXT: vstrw.32 q3, [sp, #296] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [sp, #280] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q3, q3, q2 ; CHECK-NEXT: vstrw.32 q3, [sp, #280] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q3, [sp, #264] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q3, q3, q2 ; CHECK-NEXT: vstrw.32 q3, [sp, #264] @ 16-byte Spill ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[2], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[3], r1 ; CHECK-NEXT: vmov r1, r3, d0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[4], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[5], r1 ; CHECK-NEXT: vmov r1, r3, d1 ; CHECK-NEXT: vadd.i32 q0, q5, r0 ; CHECK-NEXT: vadd.i32 q5, q5, q2 ; CHECK-NEXT: vstrw.32 q5, [sp, #200] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q5, [sp, #120] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q5, q5, q2 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[6], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[7], r1 ; CHECK-NEXT: vmov r1, r3, d0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[8], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[9], r1 ; CHECK-NEXT: vmov r1, r3, d1 ; CHECK-NEXT: vadd.i32 q0, q4, r0 ; CHECK-NEXT: vadd.i32 q4, q4, q2 ; CHECK-NEXT: vstrw.32 q4, [sp, #184] @ 16-byte Spill ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[10], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[11], r1 ; CHECK-NEXT: vmov r1, r3, d0 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[12], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[13], r1 ; CHECK-NEXT: vmov r1, r3, d1 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: vmov.8 q7[14], r1 ; CHECK-NEXT: ldrb r1, [r3] ; CHECK-NEXT: vmov.8 q7[15], r1 ; CHECK-NEXT: vadd.i8 q0, q6, q7 ; CHECK-NEXT: vldrw.u32 q7, [sp, #136] @ 16-byte Reload ; CHECK-NEXT: vstrb.8 q0, [r8], #16 ; CHECK-NEXT: vldrw.u32 q0, [sp, #168] @ 16-byte Reload ; CHECK-NEXT: vadd.i32 q7, q7, q2 ; CHECK-NEXT: vadd.i32 q0, q0, q2 ; CHECK-NEXT: bne.w .LBB13_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB13_2 Depth=1 ; CHECK-NEXT: cmp r10, r2 ; CHECK-NEXT: bne.w .LBB13_2 ; CHECK-NEXT: .LBB13_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #312 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI13_0: ; CHECK-NEXT: .long 38 @ 0x26 ; CHECK-NEXT: .long 41 @ 0x29 ; CHECK-NEXT: .long 44 @ 0x2c ; CHECK-NEXT: .long 47 @ 0x2f ; CHECK-NEXT: .LCPI13_1: ; CHECK-NEXT: .long 14 @ 0xe ; CHECK-NEXT: .long 17 @ 0x11 ; CHECK-NEXT: .long 20 @ 0x14 ; CHECK-NEXT: .long 23 @ 0x17 ; CHECK-NEXT: .LCPI13_2: ; CHECK-NEXT: .long 24 @ 0x18 ; CHECK-NEXT: .long 27 @ 0x1b ; CHECK-NEXT: .long 30 @ 0x1e ; CHECK-NEXT: .long 33 @ 0x21 ; CHECK-NEXT: .LCPI13_3: ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 7 @ 0x7 ; CHECK-NEXT: .long 10 @ 0xa ; CHECK-NEXT: .LCPI13_4: ; CHECK-NEXT: .long 36 @ 0x24 ; CHECK-NEXT: .long 39 @ 0x27 ; CHECK-NEXT: .long 42 @ 0x2a ; CHECK-NEXT: .long 45 @ 0x2d ; CHECK-NEXT: .LCPI13_5: ; CHECK-NEXT: .long 25 @ 0x19 ; CHECK-NEXT: .long 28 @ 0x1c ; CHECK-NEXT: .long 31 @ 0x1f ; CHECK-NEXT: .long 34 @ 0x22 ; CHECK-NEXT: .LCPI13_6: ; CHECK-NEXT: .long 13 @ 0xd ; CHECK-NEXT: .long 16 @ 0x10 ; CHECK-NEXT: .long 19 @ 0x13 ; CHECK-NEXT: .long 22 @ 0x16 ; CHECK-NEXT: .LCPI13_7: ; CHECK-NEXT: .long 2 @ 0x2 ; CHECK-NEXT: .long 5 @ 0x5 ; CHECK-NEXT: .long 8 @ 0x8 ; CHECK-NEXT: .long 11 @ 0xb ; CHECK-NEXT: .LCPI13_8: ; CHECK-NEXT: .long 26 @ 0x1a ; CHECK-NEXT: .long 29 @ 0x1d ; CHECK-NEXT: .long 32 @ 0x20 ; CHECK-NEXT: .long 35 @ 0x23 ; CHECK-NEXT: .LCPI13_9: ; CHECK-NEXT: .long 37 @ 0x25 ; CHECK-NEXT: .long 40 @ 0x28 ; CHECK-NEXT: .long 43 @ 0x2b ; CHECK-NEXT: .long 46 @ 0x2e ; CHECK-NEXT: .LCPI13_10: ; CHECK-NEXT: .long 12 @ 0xc ; CHECK-NEXT: .long 15 @ 0xf ; CHECK-NEXT: .long 18 @ 0x12 ; CHECK-NEXT: .long 21 @ 0x15 ; CHECK-NEXT: .LCPI13_11: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 3 @ 0x3 ; CHECK-NEXT: .long 6 @ 0x6 ; CHECK-NEXT: .long 9 @ 0x9 entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <16 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = mul nuw nsw <16 x i32> %vec.ind, %1 = getelementptr inbounds i8, ptr %data, <16 x i32> %0 %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %1, i32 2, <16 x i1> , <16 x i8> undef) %2 = add nuw nsw <16 x i32> %0, %3 = getelementptr inbounds i8, ptr %data, <16 x i32> %2 %wide.masked.gather24 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %3, i32 2, <16 x i1> , <16 x i8> undef) %4 = add nuw nsw <16 x i32> %0, %5 = getelementptr inbounds i8, ptr %data, <16 x i32> %4 %wide.masked.gather25 = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %5, i32 2, <16 x i1> , <16 x i8> undef) %6 = add nsw <16 x i8> %wide.masked.gather24, %wide.masked.gather %7 = add nsw <16 x i8> %6, %wide.masked.gather25 %8 = getelementptr inbounds i8, ptr %dst, i32 %index store <16 x i8> %7, ptr %8, align 2 %index.next = add i32 %index, 16 %vec.ind.next = add <16 x i32> %vec.ind, %9 = icmp eq i32 %index.next, %n.vec br i1 %9, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define arm_aapcs_vfpcc void @gather_inc_v16i8_simple(ptr noalias nocapture readonly %data, ptr noalias nocapture %dst, i32 %n) { ; CHECK-LABEL: gather_inc_v16i8_simple: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: push.w {r4, r5, r6, r7, r8, r9, r10, r11, lr} ; CHECK-NEXT: .pad #4 ; CHECK-NEXT: sub sp, #4 ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: .pad #64 ; CHECK-NEXT: sub sp, #64 ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: str r1, [sp, #56] @ 4-byte Spill ; CHECK-NEXT: mov r1, r2 ; CHECK-NEXT: str r2, [sp, #60] @ 4-byte Spill ; CHECK-NEXT: blt.w .LBB14_5 ; CHECK-NEXT: @ %bb.1: @ %vector.ph.preheader ; CHECK-NEXT: adr r5, .LCPI14_3 ; CHECK-NEXT: adr r7, .LCPI14_1 ; CHECK-NEXT: vldrw.u32 q0, [r5] ; CHECK-NEXT: ldr r1, [sp, #60] @ 4-byte Reload ; CHECK-NEXT: adr r3, .LCPI14_0 ; CHECK-NEXT: adr r6, .LCPI14_2 ; CHECK-NEXT: vstrw.32 q0, [sp, #32] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r7] ; CHECK-NEXT: bic r9, r1, #7 ; CHECK-NEXT: vldrw.u32 q3, [r3] ; CHECK-NEXT: vstrw.32 q0, [sp, #16] @ 16-byte Spill ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: mov.w lr, #16 ; CHECK-NEXT: str.w r9, [sp, #52] @ 4-byte Spill ; CHECK-NEXT: vstrw.32 q0, [sp] @ 16-byte Spill ; CHECK-NEXT: .LBB14_2: @ %vector.ph ; CHECK-NEXT: @ =>This Loop Header: Depth=1 ; CHECK-NEXT: @ Child Loop BB14_3 Depth 2 ; CHECK-NEXT: ldr.w r8, [sp, #56] @ 4-byte Reload ; CHECK-NEXT: vldrw.u32 q5, [sp] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q0, [sp, #16] @ 16-byte Reload ; CHECK-NEXT: vldrw.u32 q7, [sp, #32] @ 16-byte Reload ; CHECK-NEXT: vmov q4, q3 ; CHECK-NEXT: .LBB14_3: @ %vector.body ; CHECK-NEXT: @ Parent Loop BB14_2 Depth=1 ; CHECK-NEXT: @ => This Inner Loop Header: Depth=2 ; CHECK-NEXT: vadd.i32 q1, q5, r0 ; CHECK-NEXT: vadd.i32 q2, q4, r0 ; CHECK-NEXT: vmov r7, r3, d3 ; CHECK-NEXT: vadd.i32 q6, q0, lr ; CHECK-NEXT: vmov r5, r6, d5 ; CHECK-NEXT: subs.w r9, r9, #16 ; CHECK-NEXT: vmov r4, r10, d2 ; CHECK-NEXT: vadd.i32 q1, q7, lr ; CHECK-NEXT: vadd.i32 q4, q4, lr ; CHECK-NEXT: vadd.i32 q5, q5, lr ; CHECK-NEXT: ldrb.w r11, [r3] ; CHECK-NEXT: ldrb r3, [r7] ; CHECK-NEXT: vmov r7, r12, d4 ; CHECK-NEXT: vadd.i32 q2, q7, r0 ; CHECK-NEXT: vadd.i32 q7, q0, r0 ; CHECK-NEXT: ldrb r5, [r5] ; CHECK-NEXT: ldrb r6, [r6] ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: ldrb.w r10, [r10] ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: ldrb.w r1, [r12] ; CHECK-NEXT: vmov.8 q0[0], r7 ; CHECK-NEXT: vmov.8 q0[1], r1 ; CHECK-NEXT: vmov r1, r7, d15 ; CHECK-NEXT: vmov.8 q0[2], r5 ; CHECK-NEXT: vmov.8 q0[3], r6 ; CHECK-NEXT: vmov.8 q0[4], r4 ; CHECK-NEXT: vmov r4, r2, d4 ; CHECK-NEXT: vmov.8 q0[5], r10 ; CHECK-NEXT: vmov.8 q0[6], r3 ; CHECK-NEXT: vmov.8 q0[7], r11 ; CHECK-NEXT: ldrb r6, [r7] ; CHECK-NEXT: vmov r5, r7, d5 ; CHECK-NEXT: ldrb r1, [r1] ; CHECK-NEXT: ldrb r2, [r2] ; CHECK-NEXT: ldrb r3, [r5] ; CHECK-NEXT: ldrb.w r12, [r7] ; CHECK-NEXT: ldrb r5, [r4] ; CHECK-NEXT: vmov r4, r7, d14 ; CHECK-NEXT: vmov q7, q1 ; CHECK-NEXT: ldrb r4, [r4] ; CHECK-NEXT: ldrb r7, [r7] ; CHECK-NEXT: vmov.8 q0[8], r4 ; CHECK-NEXT: vmov.8 q0[9], r7 ; CHECK-NEXT: vmov.8 q0[10], r1 ; CHECK-NEXT: vmov.8 q0[11], r6 ; CHECK-NEXT: vmov.8 q0[12], r5 ; CHECK-NEXT: vmov.8 q0[13], r2 ; CHECK-NEXT: vmov.8 q0[14], r3 ; CHECK-NEXT: vmov.8 q0[15], r12 ; CHECK-NEXT: vstrb.8 q0, [r8], #16 ; CHECK-NEXT: vmov q0, q6 ; CHECK-NEXT: bne .LBB14_3 ; CHECK-NEXT: @ %bb.4: @ %middle.block ; CHECK-NEXT: @ in Loop: Header=BB14_2 Depth=1 ; CHECK-NEXT: ldr r1, [sp, #60] @ 4-byte Reload ; CHECK-NEXT: ldr.w r9, [sp, #52] @ 4-byte Reload ; CHECK-NEXT: cmp r9, r1 ; CHECK-NEXT: bne .LBB14_2 ; CHECK-NEXT: .LBB14_5: @ %for.cond.cleanup ; CHECK-NEXT: add sp, #64 ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: add sp, #4 ; CHECK-NEXT: pop.w {r4, r5, r6, r7, r8, r9, r10, r11, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.6: ; CHECK-NEXT: .LCPI14_0: ; CHECK-NEXT: .long 0 @ 0x0 ; CHECK-NEXT: .long 1 @ 0x1 ; CHECK-NEXT: .long 2 @ 0x2 ; CHECK-NEXT: .long 3 @ 0x3 ; CHECK-NEXT: .LCPI14_1: ; CHECK-NEXT: .long 8 @ 0x8 ; CHECK-NEXT: .long 9 @ 0x9 ; CHECK-NEXT: .long 10 @ 0xa ; CHECK-NEXT: .long 11 @ 0xb ; CHECK-NEXT: .LCPI14_2: ; CHECK-NEXT: .long 4 @ 0x4 ; CHECK-NEXT: .long 5 @ 0x5 ; CHECK-NEXT: .long 6 @ 0x6 ; CHECK-NEXT: .long 7 @ 0x7 ; CHECK-NEXT: .LCPI14_3: ; CHECK-NEXT: .long 12 @ 0xc ; CHECK-NEXT: .long 13 @ 0xd ; CHECK-NEXT: .long 14 @ 0xe ; CHECK-NEXT: .long 15 @ 0xf entry: %cmp22 = icmp sgt i32 %n, 0 br i1 %cmp22, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %for.body.preheader %n.vec = and i32 %n, -8 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <16 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %0 = getelementptr inbounds i8, ptr %data, <16 x i32> %vec.ind %wide.masked.gather = call <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr> %0, i32 2, <16 x i1> , <16 x i8> undef) %1 = getelementptr inbounds i8, ptr %dst, i32 %index store <16 x i8> %wide.masked.gather, ptr %1, align 2 %index.next = add i32 %index, 16 %vec.ind.next = add <16 x i32> %vec.ind, %2 = icmp eq i32 %index.next, %n.vec br i1 %2, label %middle.block, label %vector.body middle.block: ; preds = %vector.body %cmp.n = icmp eq i32 %n.vec, %n br i1 %cmp.n, label %for.cond.cleanup, label %vector.ph for.cond.cleanup: ; preds = %for.body, %middle.block, %entry ret void } define void @shl(ptr nocapture %x, ptr noalias nocapture readonly %y, i32 %n) { ; CHECK-LABEL: shl: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: it lt ; CHECK-NEXT: poplt {r7, pc} ; CHECK-NEXT: .LBB15_1: @ %vector.ph ; CHECK-NEXT: adr r3, .LCPI15_0 ; CHECK-NEXT: vldrw.u32 q0, [r3] ; CHECK-NEXT: vadd.i32 q0, q0, r1 ; CHECK-NEXT: dlstp.32 lr, r2 ; CHECK-NEXT: .LBB15_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q1, [q0, #64]! ; CHECK-NEXT: vstrw.32 q1, [r0], #16 ; CHECK-NEXT: letp lr, .LBB15_2 ; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup ; CHECK-NEXT: pop {r7, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.4: ; CHECK-NEXT: .LCPI15_0: ; CHECK-NEXT: .long 4294967232 @ 0xffffffc0 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967264 @ 0xffffffe0 ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0 entry: %cmp6 = icmp sgt i32 %n, 0 br i1 %cmp6, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %entry %n.rnd.up = add i32 %n, 3 %n.vec = and i32 %n.rnd.up, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n) %0 = shl nsw <4 x i32> %vec.ind, %1 = getelementptr inbounds i32, ptr %y, <4 x i32> %0 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) %2 = getelementptr inbounds i32, ptr %x, i32 %index call void @llvm.masked.store.v4i32.p0(<4 x i32> %wide.masked.gather, ptr %2, i32 4, <4 x i1> %active.lane.mask) %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %3 = icmp eq i32 %index.next, %n.vec br i1 %3, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body, %entry ret void } define void @shlor(ptr nocapture %x, ptr noalias nocapture readonly %y, i32 %n) { ; CHECK-LABEL: shlor: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r4, r5, r6, lr} ; CHECK-NEXT: push {r4, r5, r6, lr} ; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: cmp r2, #1 ; CHECK-NEXT: blt .LBB16_3 ; CHECK-NEXT: @ %bb.1: @ %vector.ph ; CHECK-NEXT: adr.w lr, .LCPI16_0 ; CHECK-NEXT: adr r4, .LCPI16_1 ; CHECK-NEXT: adr r5, .LCPI16_2 ; CHECK-NEXT: adr r6, .LCPI16_3 ; CHECK-NEXT: vldrw.u32 q0, [r6] ; CHECK-NEXT: vldrw.u32 q1, [r5] ; CHECK-NEXT: vldrw.u32 q2, [r4] ; CHECK-NEXT: vldrw.u32 q3, [lr] ; CHECK-NEXT: vadd.i32 q0, q0, r1 ; CHECK-NEXT: vadd.i32 q1, q1, r1 ; CHECK-NEXT: vadd.i32 q2, q2, r1 ; CHECK-NEXT: vadd.i32 q3, q3, r1 ; CHECK-NEXT: dlstp.32 lr, r2 ; CHECK-NEXT: .LBB16_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q4, [q3, #128]! ; CHECK-NEXT: vldrw.u32 q5, [q2, #128]! ; CHECK-NEXT: vldrw.u32 q6, [q0, #128]! ; CHECK-NEXT: vadd.i32 q4, q5, q4 ; CHECK-NEXT: vldrw.u32 q5, [q1, #128]! ; CHECK-NEXT: vadd.i32 q4, q4, q5 ; CHECK-NEXT: vadd.i32 q4, q4, q6 ; CHECK-NEXT: vstrw.32 q4, [r0], #16 ; CHECK-NEXT: letp lr, .LBB16_2 ; CHECK-NEXT: .LBB16_3: @ %for.cond.cleanup ; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: pop {r4, r5, r6, pc} ; CHECK-NEXT: .p2align 4 ; CHECK-NEXT: @ %bb.4: ; CHECK-NEXT: .LCPI16_0: ; CHECK-NEXT: .long 4294967168 @ 0xffffff80 ; CHECK-NEXT: .long 4294967200 @ 0xffffffa0 ; CHECK-NEXT: .long 4294967232 @ 0xffffffc0 ; CHECK-NEXT: .long 4294967264 @ 0xffffffe0 ; CHECK-NEXT: .LCPI16_1: ; CHECK-NEXT: .long 4294967176 @ 0xffffff88 ; CHECK-NEXT: .long 4294967208 @ 0xffffffa8 ; CHECK-NEXT: .long 4294967240 @ 0xffffffc8 ; CHECK-NEXT: .long 4294967272 @ 0xffffffe8 ; CHECK-NEXT: .LCPI16_2: ; CHECK-NEXT: .long 4294967184 @ 0xffffff90 ; CHECK-NEXT: .long 4294967216 @ 0xffffffb0 ; CHECK-NEXT: .long 4294967248 @ 0xffffffd0 ; CHECK-NEXT: .long 4294967280 @ 0xfffffff0 ; CHECK-NEXT: .LCPI16_3: ; CHECK-NEXT: .long 4294967192 @ 0xffffff98 ; CHECK-NEXT: .long 4294967224 @ 0xffffffb8 ; CHECK-NEXT: .long 4294967256 @ 0xffffffd8 ; CHECK-NEXT: .long 4294967288 @ 0xfffffff8 entry: %cmp23 = icmp sgt i32 %n, 0 br i1 %cmp23, label %vector.ph, label %for.cond.cleanup vector.ph: ; preds = %entry %n.rnd.up = add i32 %n, 3 %n.vec = and i32 %n.rnd.up, -4 br label %vector.body vector.body: ; preds = %vector.body, %vector.ph %index = phi i32 [ 0, %vector.ph ], [ %index.next, %vector.body ] %vec.ind = phi <4 x i32> [ , %vector.ph ], [ %vec.ind.next, %vector.body ] %active.lane.mask = call <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32 %index, i32 %n) %0 = shl nsw <4 x i32> %vec.ind, %1 = getelementptr inbounds i32, ptr %y, <4 x i32> %0 %wide.masked.gather = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %1, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) %2 = or <4 x i32> %0, %3 = getelementptr inbounds i32, ptr %y, <4 x i32> %2 %wide.masked.gather25 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %3, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) %4 = add nsw <4 x i32> %wide.masked.gather25, %wide.masked.gather %5 = or <4 x i32> %0, %6 = getelementptr inbounds i32, ptr %y, <4 x i32> %5 %wide.masked.gather26 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %6, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) %7 = add nsw <4 x i32> %4, %wide.masked.gather26 %8 = or <4 x i32> %0, %9 = getelementptr inbounds i32, ptr %y, <4 x i32> %8 %wide.masked.gather27 = call <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr> %9, i32 4, <4 x i1> %active.lane.mask, <4 x i32> undef) %10 = add nsw <4 x i32> %7, %wide.masked.gather27 %11 = getelementptr inbounds i32, ptr %x, i32 %index call void @llvm.masked.store.v4i32.p0(<4 x i32> %10, ptr %11, i32 4, <4 x i1> %active.lane.mask) %index.next = add i32 %index, 4 %vec.ind.next = add <4 x i32> %vec.ind, %12 = icmp eq i32 %index.next, %n.vec br i1 %12, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body, %entry ret void } declare <2 x i32> @llvm.masked.gather.v2i32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i32>) declare <4 x i32> @llvm.masked.gather.v4i32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i32>) declare <8 x i32> @llvm.masked.gather.v8i32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i32>) declare <16 x i32> @llvm.masked.gather.v16i32.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i32>) declare <2 x float> @llvm.masked.gather.v2f32.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x float>) declare <4 x float> @llvm.masked.gather.v4f32.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x float>) declare <8 x float> @llvm.masked.gather.v8f32.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x float>) declare <2 x i16> @llvm.masked.gather.v2i16.v2p0(<2 x ptr>, i32, <2 x i1>, <2 x i16>) declare <4 x i16> @llvm.masked.gather.v4i16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i16>) declare <8 x i16> @llvm.masked.gather.v8i16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i16>) declare <16 x i16> @llvm.masked.gather.v16i16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i16>) declare <4 x half> @llvm.masked.gather.v4f16.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x half>) declare <8 x half> @llvm.masked.gather.v8f16.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x half>) declare <16 x half> @llvm.masked.gather.v16f16.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x half>) declare <4 x i8> @llvm.masked.gather.v4i8.v4p0(<4 x ptr>, i32, <4 x i1>, <4 x i8>) declare <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr>, i32, <8 x i1>, <8 x i8>) declare <16 x i8> @llvm.masked.gather.v16i8.v16p0(<16 x ptr>, i32, <16 x i1>, <16 x i8>) declare <32 x i8> @llvm.masked.gather.v32i8.v32p0(<32 x ptr>, i32, <32 x i1>, <32 x i8>) declare void @llvm.masked.store.v4i32.p0(<4 x i32>, ptr, i32, <4 x i1>) declare <4 x i1> @llvm.get.active.lane.mask.v4i1.i32(i32, i32)