; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s define arm_aapcs_vfpcc i32 @vqdmulh_v16i8(<16 x i8> %s0, <16 x i8> %s1) { ; CHECK-LABEL: vqdmulh_v16i8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 ; CHECK-NEXT: vaddv.s8 r0, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <16 x i8> %s0 to <16 x i32> %l5 = sext <16 x i8> %s1 to <16 x i32> %l6 = mul nsw <16 x i32> %l5, %l2 %l7 = ashr <16 x i32> %l6, %l9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %l7, <16 x i32> ) %l10 = call i32 @llvm.vector.reduce.add.v16i32(<16 x i32> %l9) ret i32 %l10 } define arm_aapcs_vfpcc <16 x i8> @vqdmulh_v16i8_b(<16 x i8> %s0, <16 x i8> %s1) { ; CHECK-LABEL: vqdmulh_v16i8_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <16 x i8> %s0 to <16 x i32> %l5 = sext <16 x i8> %s1 to <16 x i32> %l6 = mul nsw <16 x i32> %l5, %l2 %l7 = ashr <16 x i32> %l6, %l9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %l7, <16 x i32> ) %l10 = trunc <16 x i32> %l9 to <16 x i8> ret <16 x i8> %l10 } define arm_aapcs_vfpcc <8 x i8> @vqdmulh_v8i8_b(<8 x i8> %s0, <8 x i8> %s1) { ; CHECK-LABEL: vqdmulh_v8i8_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 ; CHECK-NEXT: vmovlb.s8 q0, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <8 x i8> %s0 to <8 x i32> %l5 = sext <8 x i8> %s1 to <8 x i32> %l6 = mul nsw <8 x i32> %l5, %l2 %l7 = ashr <8 x i32> %l6, %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> ) %l10 = trunc <8 x i32> %l9 to <8 x i8> ret <8 x i8> %l10 } define arm_aapcs_vfpcc <4 x i8> @vqdmulh_v4i8_b(<4 x i8> %s0, <4 x i8> %s1) { ; CHECK-LABEL: vqdmulh_v4i8_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 ; CHECK-NEXT: vmovlb.s8 q0, q0 ; CHECK-NEXT: vmovlb.s16 q0, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <4 x i8> %s0 to <4 x i32> %l5 = sext <4 x i8> %s1 to <4 x i32> %l6 = mul nsw <4 x i32> %l5, %l2 %l7 = ashr <4 x i32> %l6, %l9 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l7, <4 x i32> ) %l10 = trunc <4 x i32> %l9 to <4 x i8> ret <4 x i8> %l10 } define arm_aapcs_vfpcc <32 x i8> @vqdmulh_v32i8_b(<32 x i8> %s0, <32 x i8> %s1) { ; CHECK-LABEL: vqdmulh_v32i8_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s8 q0, q2, q0 ; CHECK-NEXT: vqdmulh.s8 q1, q3, q1 ; CHECK-NEXT: bx lr entry: %l2 = sext <32 x i8> %s0 to <32 x i32> %l5 = sext <32 x i8> %s1 to <32 x i32> %l6 = mul nsw <32 x i32> %l5, %l2 %l7 = ashr <32 x i32> %l6, %l9 = call <32 x i32> @llvm.smin.v32i32(<32 x i32> %l7, <32 x i32> ) %l10 = trunc <32 x i32> %l9 to <32 x i8> ret <32 x i8> %l10 } define arm_aapcs_vfpcc i32 @vqdmulh_v8i16(<8 x i16> %s0, <8 x i16> %s1) { ; CHECK-LABEL: vqdmulh_v8i16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 ; CHECK-NEXT: vaddv.s16 r0, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <8 x i16> %s0 to <8 x i32> %l5 = sext <8 x i16> %s1 to <8 x i32> %l6 = mul nsw <8 x i32> %l5, %l2 %l7 = ashr <8 x i32> %l6, %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> ) %l10 = call i32 @llvm.vector.reduce.add.v8i32(<8 x i32> %l9) ret i32 %l10 } define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_b(<8 x i16> %s0, <8 x i16> %s1) { ; CHECK-LABEL: vqdmulh_v8i16_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <8 x i16> %s0 to <8 x i32> %l5 = sext <8 x i16> %s1 to <8 x i32> %l6 = mul nsw <8 x i32> %l5, %l2 %l7 = ashr <8 x i32> %l6, %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> ) %l10 = trunc <8 x i32> %l9 to <8 x i16> ret <8 x i16> %l10 } define arm_aapcs_vfpcc <4 x i16> @vqdmulh_v4i16_b(<4 x i16> %s0, <4 x i16> %s1) { ; CHECK-LABEL: vqdmulh_v4i16_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 ; CHECK-NEXT: vmovlb.s16 q0, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <4 x i16> %s0 to <4 x i32> %l5 = sext <4 x i16> %s1 to <4 x i32> %l6 = mul nsw <4 x i32> %l5, %l2 %l7 = ashr <4 x i32> %l6, %l9 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l7, <4 x i32> ) %l10 = trunc <4 x i32> %l9 to <4 x i16> ret <4 x i16> %l10 } define arm_aapcs_vfpcc <16 x i16> @vqdmulh_v16i16_b(<16 x i16> %s0, <16 x i16> %s1) { ; CHECK-LABEL: vqdmulh_v16i16_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s16 q0, q2, q0 ; CHECK-NEXT: vqdmulh.s16 q1, q3, q1 ; CHECK-NEXT: bx lr entry: %l2 = sext <16 x i16> %s0 to <16 x i32> %l5 = sext <16 x i16> %s1 to <16 x i32> %l6 = mul nsw <16 x i32> %l5, %l2 %l7 = ashr <16 x i32> %l6, %l9 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %l7, <16 x i32> ) %l10 = trunc <16 x i32> %l9 to <16 x i16> ret <16 x i16> %l10 } define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_c(<8 x i16> %s0, <8 x i16> %s1) { ; CHECK-LABEL: vqdmulh_v8i16_c: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .pad #16 ; CHECK-NEXT: sub sp, #16 ; CHECK-NEXT: vmov.u16 r0, q0[6] ; CHECK-NEXT: vmov.u16 r1, q0[4] ; CHECK-NEXT: vmov q2[2], q2[0], r1, r0 ; CHECK-NEXT: vmov.u16 r0, q0[7] ; CHECK-NEXT: vmov.u16 r1, q0[5] ; CHECK-NEXT: vmov.u16 r2, q0[0] ; CHECK-NEXT: vmov q2[3], q2[1], r1, r0 ; CHECK-NEXT: vmov.u16 r0, q1[6] ; CHECK-NEXT: vmov.u16 r1, q1[4] ; CHECK-NEXT: vmov q3[2], q3[0], r1, r0 ; CHECK-NEXT: vmov.u16 r0, q1[7] ; CHECK-NEXT: vmov.u16 r1, q1[5] ; CHECK-NEXT: vmov q3[3], q3[1], r1, r0 ; CHECK-NEXT: mov r0, sp ; CHECK-NEXT: vmullb.s16 q2, q3, q2 ; CHECK-NEXT: vmov.u16 r1, q0[2] ; CHECK-NEXT: vshl.i32 q2, q2, #10 ; CHECK-NEXT: vshr.s32 q2, q2, #10 ; CHECK-NEXT: vshr.s32 q2, q2, #15 ; CHECK-NEXT: vstrh.32 q2, [r0, #8] ; CHECK-NEXT: vmov q2[2], q2[0], r2, r1 ; CHECK-NEXT: vmov.u16 r1, q0[3] ; CHECK-NEXT: vmov.u16 r2, q0[1] ; CHECK-NEXT: vmov q2[3], q2[1], r2, r1 ; CHECK-NEXT: vmov.u16 r1, q1[2] ; CHECK-NEXT: vmov.u16 r2, q1[0] ; CHECK-NEXT: vmov q0[2], q0[0], r2, r1 ; CHECK-NEXT: vmov.u16 r1, q1[3] ; CHECK-NEXT: vmov.u16 r2, q1[1] ; CHECK-NEXT: vmov q0[3], q0[1], r2, r1 ; CHECK-NEXT: vmullb.s16 q0, q0, q2 ; CHECK-NEXT: vshl.i32 q0, q0, #10 ; CHECK-NEXT: vshr.s32 q0, q0, #10 ; CHECK-NEXT: vshr.s32 q0, q0, #15 ; CHECK-NEXT: vstrh.32 q0, [r0] ; CHECK-NEXT: vldrw.u32 q0, [r0] ; CHECK-NEXT: add sp, #16 ; CHECK-NEXT: bx lr entry: %l2 = sext <8 x i16> %s0 to <8 x i22> %l5 = sext <8 x i16> %s1 to <8 x i22> %l6 = mul nsw <8 x i22> %l5, %l2 %l7 = ashr <8 x i22> %l6, %l9 = call <8 x i22> @llvm.smin.v8i22(<8 x i22> %l7, <8 x i22> ) %l10 = trunc <8 x i22> %l9 to <8 x i16> ret <8 x i16> %l10 } define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved(<8 x i16> %s0, <8 x i16> %s1) { ; CHECK-LABEL: vqdmulh_v8i16_interleaved: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 ; CHECK-NEXT: bx lr entry: %0 = shufflevector <8 x i16> %s0, <8 x i16> undef, <8 x i32> %1 = sext <8 x i16> %0 to <8 x i32> %l2 = sext <8 x i16> %s0 to <8 x i32> %2 = shufflevector <8 x i16> %s1, <8 x i16> undef, <8 x i32> %3 = sext <8 x i16> %2 to <8 x i32> %l5 = sext <8 x i16> %s1 to <8 x i32> %l6 = mul nsw <8 x i32> %3, %1 %l7 = ashr <8 x i32> %l6, %l9 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %l7, <8 x i32> ) %l10 = trunc <8 x i32> %l9 to <8 x i16> %4 = shufflevector <8 x i16> %l10, <8 x i16> undef, <8 x i32> ret <8 x i16> %4 } define arm_aapcs_vfpcc <8 x i16> @vqdmulh_v8i16_interleaved2(<4 x i32> %s0a, <8 x i16> %s1) { ; CHECK-LABEL: vqdmulh_v8i16_interleaved2: ; CHECK: @ %bb.0: ; CHECK-NEXT: vqdmulh.s16 q2, q1, q0 ; CHECK-NEXT: vrev32.16 q1, q1 ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 ; CHECK-NEXT: vmovnt.i32 q2, q0 ; CHECK-NEXT: vmov q0, q2 ; CHECK-NEXT: bx lr %s0 = trunc <4 x i32> %s0a to <4 x i16> %strided.vec = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> %strided.vec44 = shufflevector <8 x i16> %s1, <8 x i16> poison, <4 x i32> %l7 = sext <4 x i16> %strided.vec to <4 x i32> %l8 = sext <4 x i16> %s0 to <4 x i32> %l9 = mul nsw <4 x i32> %l7, %l8 %l10 = ashr <4 x i32> %l9, %l12 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l10, <4 x i32> ) %l13 = trunc <4 x i32> %l12 to <4 x i16> %l14 = sext <4 x i16> %strided.vec44 to <4 x i32> %l15 = mul nsw <4 x i32> %l14, %l8 %l16 = ashr <4 x i32> %l15, %l18 = call <4 x i32> @llvm.smin.v4i32(<4 x i32> %l16, <4 x i32> ) %l19 = trunc <4 x i32> %l18 to <4 x i16> %interleaved.vec = shufflevector <4 x i16> %l13, <4 x i16> %l19, <8 x i32> ret <8 x i16> %interleaved.vec } define arm_aapcs_vfpcc i64 @vqdmulh_v4i32(<4 x i32> %s0, <4 x i32> %s1) { ; CHECK-LABEL: vqdmulh_v4i32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 ; CHECK-NEXT: vaddlv.s32 r0, r1, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <4 x i32> %s0 to <4 x i64> %l5 = sext <4 x i32> %s1 to <4 x i64> %l6 = mul nsw <4 x i64> %l5, %l2 %l7 = ashr <4 x i64> %l6, %l9 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %l7, <4 x i64> ) %l10 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> %l9) ret i64 %l10 } define arm_aapcs_vfpcc <4 x i32> @vqdmulh_v4i32_b(<4 x i32> %s0, <4 x i32> %s1) { ; CHECK-LABEL: vqdmulh_v4i32_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 ; CHECK-NEXT: bx lr entry: %l2 = sext <4 x i32> %s0 to <4 x i64> %l5 = sext <4 x i32> %s1 to <4 x i64> %l6 = mul nsw <4 x i64> %l5, %l2 %l7 = ashr <4 x i64> %l6, %l9 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %l7, <4 x i64> ) %l10 = trunc <4 x i64> %l9 to <4 x i32> ret <4 x i32> %l10 } define arm_aapcs_vfpcc <2 x i32> @vqdmulh_v2i32_b(<2 x i32> %s0, <2 x i32> %s1) { ; CHECK-LABEL: vqdmulh_v2i32_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 ; CHECK-NEXT: vmov r0, s2 ; CHECK-NEXT: vmov r1, s0 ; CHECK-NEXT: vmov q0[2], q0[0], r1, r0 ; CHECK-NEXT: asrs r0, r0, #31 ; CHECK-NEXT: asrs r1, r1, #31 ; CHECK-NEXT: vmov q0[3], q0[1], r1, r0 ; CHECK-NEXT: bx lr entry: %l2 = sext <2 x i32> %s0 to <2 x i64> %l5 = sext <2 x i32> %s1 to <2 x i64> %l6 = mul nsw <2 x i64> %l5, %l2 %l7 = ashr <2 x i64> %l6, %l9 = call <2 x i64> @llvm.smin.v2i64(<2 x i64> %l7, <2 x i64> ) %l10 = trunc <2 x i64> %l9 to <2 x i32> ret <2 x i32> %l10 } define arm_aapcs_vfpcc <8 x i32> @vqdmulh_v8i32_b(<8 x i32> %s0, <8 x i32> %s1) { ; CHECK-LABEL: vqdmulh_v8i32_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: vqdmulh.s32 q0, q2, q0 ; CHECK-NEXT: vqdmulh.s32 q1, q3, q1 ; CHECK-NEXT: bx lr entry: %l2 = sext <8 x i32> %s0 to <8 x i64> %l5 = sext <8 x i32> %s1 to <8 x i64> %l6 = mul nsw <8 x i64> %l5, %l2 %l7 = ashr <8 x i64> %l6, %l9 = call <8 x i64> @llvm.smin.v8i64(<8 x i64> %l7, <8 x i64> ) %l10 = trunc <8 x i64> %l9 to <8 x i32> ret <8 x i32> %l10 } define arm_aapcs_vfpcc <16 x i32> @vqdmulh_v16i32_b(<16 x i32> %s0, <16 x i32> %s1) { ; CHECK-LABEL: vqdmulh_v16i32_b: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .vsave {d8, d9} ; CHECK-NEXT: vpush {d8, d9} ; CHECK-NEXT: add r0, sp, #16 ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: add r0, sp, #32 ; CHECK-NEXT: vqdmulh.s32 q0, q4, q0 ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: add r0, sp, #48 ; CHECK-NEXT: vqdmulh.s32 q1, q4, q1 ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: add r0, sp, #64 ; CHECK-NEXT: vqdmulh.s32 q2, q4, q2 ; CHECK-NEXT: vldrw.u32 q4, [r0] ; CHECK-NEXT: vqdmulh.s32 q3, q4, q3 ; CHECK-NEXT: vpop {d8, d9} ; CHECK-NEXT: bx lr entry: %l2 = sext <16 x i32> %s0 to <16 x i64> %l5 = sext <16 x i32> %s1 to <16 x i64> %l6 = mul nsw <16 x i64> %l5, %l2 %l7 = ashr <16 x i64> %l6, %l9 = call <16 x i64> @llvm.smin.v16i64(<16 x i64> %l7, <16 x i64> ) %l10 = trunc <16 x i64> %l9 to <16 x i32> ret <16 x i32> %l10 } define void @vqdmulh_loop_i8(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) local_unnamed_addr #0 { ; CHECK-LABEL: vqdmulh_loop_i8: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: mov.w lr, #64 ; CHECK-NEXT: .LBB17_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrb.u8 q0, [r0], #16 ; CHECK-NEXT: vldrb.u8 q1, [r1], #16 ; CHECK-NEXT: vqdmulh.s8 q0, q1, q0 ; CHECK-NEXT: vstrb.8 q0, [r2], #16 ; CHECK-NEXT: le lr, .LBB17_1 ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup ; CHECK-NEXT: pop {r7, pc} entry: br label %vector.body vector.body: ; preds = %vector.body, %entry %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i8, ptr %x, i32 %index %wide.load = load <16 x i8>, ptr %0, align 1 %1 = sext <16 x i8> %wide.load to <16 x i32> %2 = getelementptr inbounds i8, ptr %y, i32 %index %wide.load26 = load <16 x i8>, ptr %2, align 1 %3 = sext <16 x i8> %wide.load26 to <16 x i32> %4 = mul nsw <16 x i32> %3, %1 %5 = ashr <16 x i32> %4, %6 = icmp slt <16 x i32> %5, %7 = call <16 x i32> @llvm.smin.v16i32(<16 x i32> %5, <16 x i32> ) %8 = trunc <16 x i32> %7 to <16 x i8> %9 = getelementptr inbounds i8, ptr %z, i32 %index store <16 x i8> %8, ptr %9, align 1 %index.next = add i32 %index, 16 %10 = icmp eq i32 %index.next, 1024 br i1 %10, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body ret void } define void @vqdmulh_loop_i16(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) { ; CHECK-LABEL: vqdmulh_loop_i16: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: mov.w lr, #128 ; CHECK-NEXT: .LBB18_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrh.u16 q0, [r0], #16 ; CHECK-NEXT: vldrh.u16 q1, [r1], #16 ; CHECK-NEXT: vqdmulh.s16 q0, q1, q0 ; CHECK-NEXT: vstrb.8 q0, [r2], #16 ; CHECK-NEXT: le lr, .LBB18_1 ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup ; CHECK-NEXT: pop {r7, pc} entry: br label %vector.body vector.body: ; preds = %vector.body, %entry %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i16, ptr %x, i32 %index %wide.load = load <8 x i16>, ptr %0, align 2 %1 = sext <8 x i16> %wide.load to <8 x i32> %2 = getelementptr inbounds i16, ptr %y, i32 %index %wide.load30 = load <8 x i16>, ptr %2, align 2 %3 = sext <8 x i16> %wide.load30 to <8 x i32> %4 = mul nsw <8 x i32> %3, %1 %5 = ashr <8 x i32> %4, %6 = icmp slt <8 x i32> %5, %7 = call <8 x i32> @llvm.smin.v8i32(<8 x i32> %5, <8 x i32> ) %8 = trunc <8 x i32> %7 to <8 x i16> %9 = getelementptr inbounds i16, ptr %z, i32 %index store <8 x i16> %8, ptr %9, align 2 %index.next = add i32 %index, 8 %10 = icmp eq i32 %index.next, 1024 br i1 %10, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body ret void } define void @vqdmulh_loop_i32(ptr nocapture readonly %x, ptr nocapture readonly %y, ptr noalias nocapture %z, i32 %n) { ; CHECK-LABEL: vqdmulh_loop_i32: ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} ; CHECK-NEXT: mov.w lr, #256 ; CHECK-NEXT: .LBB19_1: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 ; CHECK-NEXT: vldrw.u32 q0, [r0], #16 ; CHECK-NEXT: vldrw.u32 q1, [r1], #16 ; CHECK-NEXT: vqdmulh.s32 q0, q1, q0 ; CHECK-NEXT: vstrb.8 q0, [r2], #16 ; CHECK-NEXT: le lr, .LBB19_1 ; CHECK-NEXT: @ %bb.2: @ %for.cond.cleanup ; CHECK-NEXT: pop {r7, pc} entry: br label %vector.body vector.body: ; preds = %vector.body, %entry %index = phi i32 [ 0, %entry ], [ %index.next, %vector.body ] %0 = getelementptr inbounds i32, ptr %x, i32 %index %wide.load = load <4 x i32>, ptr %0, align 4 %1 = sext <4 x i32> %wide.load to <4 x i64> %2 = getelementptr inbounds i32, ptr %y, i32 %index %wide.load30 = load <4 x i32>, ptr %2, align 4 %3 = sext <4 x i32> %wide.load30 to <4 x i64> %4 = mul nsw <4 x i64> %3, %1 %5 = ashr <4 x i64> %4, %6 = icmp slt <4 x i64> %5, %7 = call <4 x i64> @llvm.smin.v4i64(<4 x i64> %5, <4 x i64> ) %8 = trunc <4 x i64> %7 to <4 x i32> %9 = getelementptr inbounds i32, ptr %z, i32 %index store <4 x i32> %8, ptr %9, align 4 %index.next = add i32 %index, 4 %10 = icmp eq i32 %index.next, 1024 br i1 %10, label %for.cond.cleanup, label %vector.body for.cond.cleanup: ; preds = %vector.body ret void } define i32 @scalar(i16 %a) { ; CHECK-LABEL: scalar: ; CHECK: @ %bb.0: ; CHECK-NEXT: smulbb r1, r0, r0 ; CHECK-NEXT: movs r0, #127 ; CHECK-NEXT: lsrs r2, r1, #7 ; CHECK-NEXT: cmp r2, #127 ; CHECK-NEXT: it lt ; CHECK-NEXT: lsrlt r0, r1, #7 ; CHECK-NEXT: bx lr %e = sext i16 %a to i32 %d = mul nsw i32 %e, %e %b = ashr i32 %d, 7 %c = call i32 @llvm.smin.i32(i32 %b, i32 127) ret i32 %c } declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) declare i32 @llvm.vector.reduce.add.v8i32(<8 x i32>) declare i32 @llvm.vector.reduce.add.v16i32(<16 x i32>) declare i32 @llvm.smin.i32(i32 %a, i32 %b) declare <2 x i64> @llvm.smin.v2i64(<2 x i64>, <2 x i64>) declare <4 x i64> @llvm.smin.v4i64(<4 x i64>, <4 x i64>) declare <8 x i64> @llvm.smin.v8i64(<8 x i64>, <8 x i64>) declare <16 x i64> @llvm.smin.v16i64(<16 x i64>, <16 x i64>) declare <4 x i32> @llvm.smin.v4i32(<4 x i32>, <4 x i32>) declare <8 x i32> @llvm.smin.v8i32(<8 x i32>, <8 x i32>) declare <16 x i32> @llvm.smin.v16i32(<16 x i32>, <16 x i32>) declare <32 x i32> @llvm.smin.v32i32(<32 x i32>, <32 x i32>) declare <8 x i22> @llvm.smin.v8i22(<8 x i22>, <8 x i22>)