; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12 ; These patterns are produced by LoopVectorizer for interleaved stores. define void @store_i8_stride5_vf2(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind { ; SSE-LABEL: store_i8_stride5_vf2: ; SSE: # %bb.0: ; SSE-NEXT: movdqa (%rdi), %xmm0 ; SSE-NEXT: movdqa (%rdx), %xmm1 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3] ; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,1,3,4,5,6,7] ; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,4] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,7,5] ; SSE-NEXT: packuswb %xmm0, %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,255,255] ; SSE-NEXT: pand %xmm0, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = mem[0,0,0,0] ; SSE-NEXT: pandn %xmm2, %xmm0 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: movq %xmm0, (%r9) ; SSE-NEXT: pextrw $4, %xmm0, %eax ; SSE-NEXT: movw %ax, 8(%r9) ; SSE-NEXT: retq ; ; AVX-LABEL: store_i8_stride5_vf2: ; AVX: # %bb.0: ; AVX-NEXT: vmovdqa (%rdi), %xmm0 ; AVX-NEXT: vmovdqa (%rdx), %xmm1 ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3] ; AVX-NEXT: vpunpcklwd {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3] ; AVX-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1] ; AVX-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] ; AVX-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,1,2,6,10,14,3,u,u,u,u,u,u] ; AVX-NEXT: vpextrw $4, %xmm0, 8(%r9) ; AVX-NEXT: vmovq %xmm0, (%r9) ; AVX-NEXT: retq %in.vec0 = load <2 x i8>, ptr %in.vecptr0, align 64 %in.vec1 = load <2 x i8>, ptr %in.vecptr1, align 64 %in.vec2 = load <2 x i8>, ptr %in.vecptr2, align 64 %in.vec3 = load <2 x i8>, ptr %in.vecptr3, align 64 %in.vec4 = load <2 x i8>, ptr %in.vecptr4, align 64 %1 = shufflevector <2 x i8> %in.vec0, <2 x i8> %in.vec1, <4 x i32> %2 = shufflevector <2 x i8> %in.vec2, <2 x i8> %in.vec3, <4 x i32> %3 = shufflevector <4 x i8> %1, <4 x i8> %2, <8 x i32> %4 = shufflevector <2 x i8> %in.vec4, <2 x i8> poison, <8 x i32> %5 = shufflevector <8 x i8> %3, <8 x i8> %4, <10 x i32> %interleaved.vec = shufflevector <10 x i8> %5, <10 x i8> poison, <10 x i32> store <10 x i8> %interleaved.vec, ptr %out.vec, align 64 ret void } define void @store_i8_stride5_vf4(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind { ; SSE-LABEL: store_i8_stride5_vf4: ; SSE: # %bb.0: ; SSE-NEXT: movdqa (%rdi), %xmm1 ; SSE-NEXT: movdqa (%rdx), %xmm2 ; SSE-NEXT: movdqa (%r8), %xmm0 ; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],mem[0],xmm2[1],mem[1] ; SSE-NEXT: pxor %xmm3, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm1[3,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,2,0,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,3,2,3] ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,1,1,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,2,3] ; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[0,2,0,0] ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,1,0,2,4,5,6,7] ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,0,0,65535,65535,65535,0] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,2,2,0] ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,2,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,5,7] ; SSE-NEXT: pand %xmm5, %xmm6 ; SSE-NEXT: pandn %xmm4, %xmm5 ; SSE-NEXT: por %xmm6, %xmm5 ; SSE-NEXT: packuswb %xmm3, %xmm5 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; SSE-NEXT: pand %xmm3, %xmm5 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,0,0,0] ; SSE-NEXT: pandn %xmm4, %xmm3 ; SSE-NEXT: por %xmm5, %xmm3 ; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[3,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,1,3,4,5,6,7] ; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; SSE-NEXT: por %xmm1, %xmm2 ; SSE-NEXT: packuswb %xmm2, %xmm2 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255] ; SSE-NEXT: pand %xmm1, %xmm2 ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: por %xmm2, %xmm1 ; SSE-NEXT: movd %xmm1, 16(%r9) ; SSE-NEXT: movdqa %xmm3, (%r9) ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: store_i8_stride5_vf4: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm0 ; AVX1-ONLY-NEXT: vmovdqa (%rdx), %xmm1 ; AVX1-ONLY-NEXT: vmovdqa (%r8), %xmm2 ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; AVX1-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,4,8,12],zero,xmm0[1,5,9,13],zero,xmm0[2,6,10,14],zero,xmm0[3] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,zero,xmm2[0],zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,xmm2[2],zero ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vpalignr {{.*#+}} xmm0 = xmm0[7,8,9,10,11,12,13,14,15],xmm2[0,1,2,3,4,5,6] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,4,8,12,u,u,u,u,u,u,u,u,u,u,u,u] ; AVX1-ONLY-NEXT: vmovd %xmm0, 16(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%r9) ; AVX1-ONLY-NEXT: retq ; ; AVX2-ONLY-LABEL: store_i8_stride5_vf4: ; AVX2-ONLY: # %bb.0: ; AVX2-ONLY-NEXT: vmovdqa (%rdi), %xmm0 ; AVX2-ONLY-NEXT: vmovdqa (%rdx), %xmm1 ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; AVX2-ONLY-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; AVX2-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX2-ONLY-NEXT: vinserti128 $1, (%r8), %ymm0, %ymm0 ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,4,8,12],zero,ymm0[1,5,9,13],zero,ymm0[2,6,10,14],zero,ymm0[3],zero,zero,zero,ymm0[19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX2-ONLY-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1] ; AVX2-ONLY-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,ymm0[1],zero,zero,zero,zero,ymm0[2],zero,ymm0[23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX2-ONLY-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX2-ONLY-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX2-ONLY-NEXT: vmovd %xmm1, 16(%r9) ; AVX2-ONLY-NEXT: vmovdqa %xmm0, (%r9) ; AVX2-ONLY-NEXT: vzeroupper ; AVX2-ONLY-NEXT: retq ; ; AVX512F-LABEL: store_i8_stride5_vf4: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512F-NEXT: vmovdqa (%rdx), %xmm1 ; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; AVX512F-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; AVX512F-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX512F-NEXT: vinserti128 $1, (%r8), %ymm0, %ymm0 ; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,4,8,12],zero,ymm0[1,5,9,13],zero,ymm0[2,6,10,14],zero,ymm0[3],zero,zero,zero,ymm0[19,u,u,u,u,u,u,u,u,u,u,u,u] ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1] ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,ymm0[1],zero,zero,zero,zero,ymm0[2],zero,ymm0[23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX512F-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512F-NEXT: vmovd %xmm1, 16(%r9) ; AVX512F-NEXT: vmovdqa %xmm0, (%r9) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: store_i8_stride5_vf4: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512BW-NEXT: vmovdqa (%rdx), %xmm1 ; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1] ; AVX512BW-NEXT: vpunpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1] ; AVX512BW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0] ; AVX512BW-NEXT: vinserti128 $1, (%r8), %ymm0, %ymm0 ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm1 = ymm0[0,4,8,12],zero,ymm0[1,5,9,13],zero,ymm0[2,6,10,14],zero,ymm0[3],zero,zero,zero,ymm0[19],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX512BW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,0,1] ; AVX512BW-NEXT: vpshufb {{.*#+}} ymm0 = zero,zero,zero,zero,ymm0[0],zero,zero,zero,zero,ymm0[1],zero,zero,zero,zero,ymm0[2],zero,ymm0[23,27,31],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero ; AVX512BW-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1 ; AVX512BW-NEXT: vmovd %xmm1, 16(%r9) ; AVX512BW-NEXT: vmovdqa %xmm0, (%r9) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %in.vec0 = load <4 x i8>, ptr %in.vecptr0, align 64 %in.vec1 = load <4 x i8>, ptr %in.vecptr1, align 64 %in.vec2 = load <4 x i8>, ptr %in.vecptr2, align 64 %in.vec3 = load <4 x i8>, ptr %in.vecptr3, align 64 %in.vec4 = load <4 x i8>, ptr %in.vecptr4, align 64 %1 = shufflevector <4 x i8> %in.vec0, <4 x i8> %in.vec1, <8 x i32> %2 = shufflevector <4 x i8> %in.vec2, <4 x i8> %in.vec3, <8 x i32> %3 = shufflevector <8 x i8> %1, <8 x i8> %2, <16 x i32> %4 = shufflevector <4 x i8> %in.vec4, <4 x i8> poison, <16 x i32> %5 = shufflevector <16 x i8> %3, <16 x i8> %4, <20 x i32> %interleaved.vec = shufflevector <20 x i8> %5, <20 x i8> poison, <20 x i32> store <20 x i8> %interleaved.vec, ptr %out.vec, align 64 ret void } define void @store_i8_stride5_vf8(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind { ; SSE-LABEL: store_i8_stride5_vf8: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm4 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm3 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[1,1,1,1] ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [65535,65535,0,65535,0,65535,65535,0] ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm3[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,5,5,5] ; SSE-NEXT: pand %xmm8, %xmm6 ; SSE-NEXT: pandn %xmm5, %xmm8 ; SSE-NEXT: por %xmm6, %xmm8 ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,0,255,0,255,255,0,255,0,255,255,0,255,0,255] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm4[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,0,0] ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; SSE-NEXT: movdqa %xmm6, %xmm10 ; SSE-NEXT: pandn %xmm5, %xmm10 ; SSE-NEXT: movdqa %xmm2, %xmm7 ; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm2[0],xmm7[1],xmm2[1],xmm7[2],xmm2[2],xmm7[3],xmm2[3],xmm7[4],xmm2[4],xmm7[5],xmm2[5],xmm7[6],xmm2[6],xmm7[7],xmm2[7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm6, %xmm5 ; SSE-NEXT: por %xmm10, %xmm5 ; SSE-NEXT: pand %xmm9, %xmm5 ; SSE-NEXT: pandn %xmm8, %xmm9 ; SSE-NEXT: por %xmm5, %xmm9 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; SSE-NEXT: pand %xmm8, %xmm9 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,0,1,1] ; SSE-NEXT: movdqa %xmm8, %xmm5 ; SSE-NEXT: pandn %xmm10, %xmm5 ; SSE-NEXT: por %xmm9, %xmm5 ; SSE-NEXT: pshuflw {{.*#+}} xmm9 = xmm4[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm9[0,0,0,0] ; SSE-NEXT: pand %xmm8, %xmm9 ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm3[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,1,0,1] ; SSE-NEXT: pandn %xmm10, %xmm8 ; SSE-NEXT: por %xmm9, %xmm8 ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,0,0,0] ; SSE-NEXT: pand %xmm6, %xmm10 ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,0,1,3] ; SSE-NEXT: pandn %xmm7, %xmm6 ; SSE-NEXT: por %xmm10, %xmm6 ; SSE-NEXT: pand %xmm9, %xmm6 ; SSE-NEXT: pandn %xmm8, %xmm9 ; SSE-NEXT: por %xmm6, %xmm9 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; SSE-NEXT: pand %xmm6, %xmm9 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,0,0,0] ; SSE-NEXT: pandn %xmm7, %xmm6 ; SSE-NEXT: por %xmm9, %xmm6 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7] ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,0,255,255,255,255,0,255,255,255,255,255,255,255,255,255] ; SSE-NEXT: pand %xmm7, %xmm4 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,3,3] ; SSE-NEXT: pandn %xmm3, %xmm7 ; SSE-NEXT: por %xmm4, %xmm7 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,0,0,255,255,255,255,255,255,255,255,255,255,255] ; SSE-NEXT: pand %xmm3, %xmm7 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm2[0,1,2,3,7,7,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2] ; SSE-NEXT: pandn %xmm1, %xmm3 ; SSE-NEXT: por %xmm7, %xmm3 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,0,255,255,255,255,0,255,255,255,255,255,255,255,255] ; SSE-NEXT: pand %xmm1, %xmm3 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1] ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: por %xmm3, %xmm1 ; SSE-NEXT: movq %xmm1, 32(%r9) ; SSE-NEXT: movdqa %xmm6, (%r9) ; SSE-NEXT: movdqa %xmm5, 16(%r9) ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: store_i8_stride5_vf8: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX1-ONLY-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX1-ONLY-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,2,13,6,8,10,12,15,u,u,u,u,u,u,u,u] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = zero,zero,xmm1[0,8,u],zero,zero,xmm1[1,9,u],zero,zero,xmm1[2,10,u],zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm0[0,8],zero,zero,xmm0[u,1,9],zero,zero,xmm0[u,2,10],zero,zero,xmm0[u,3] ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm5, %xmm4 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,2,3],zero,xmm4[5,6,7,8],zero,xmm4[10,11,12,13],zero,xmm4[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,zero,zero,zero,xmm2[0],zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,xmm2[2],zero ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[3,11,u],zero,zero,xmm1[4,12,u],zero,zero,xmm1[5,13,u],zero,zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[11],zero,zero,xmm0[u,4,12],zero,zero,xmm0[u,5,13],zero,zero,xmm0[u,6,14] ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,1,2],zero,xmm0[4,5,6,7],zero,xmm0[9,10,11,12],zero,xmm0[14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = zero,zero,zero,xmm2[3],zero,zero,zero,zero,xmm2[4],zero,zero,zero,zero,xmm2[5],zero,zero ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 16(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm4, (%r9) ; AVX1-ONLY-NEXT: vmovq %xmm3, 32(%r9) ; AVX1-ONLY-NEXT: retq ; ; AVX2-SLOW-LABEL: store_i8_stride5_vf8: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: movq (%r8), %rax ; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-SLOW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX2-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 ; AVX2-SLOW-NEXT: vmovq %rax, %xmm3 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,zero,ymm2[3],zero,ymm2[19,27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,ymm2[27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero,ymm2[22,30] ; AVX2-SLOW-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX2-SLOW-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX2-SLOW-NEXT: shrq $48, %rax ; AVX2-SLOW-NEXT: vmovd %eax, %xmm1 ; AVX2-SLOW-NEXT: vpbroadcastw %xmm1, %xmm1 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} xmm3 = [255,255,0,255,255,255,255,0,255,255,0,255,255,255,255,0] ; AVX2-SLOW-NEXT: vpblendvb %xmm3, %xmm0, %xmm1, %xmm0 ; AVX2-SLOW-NEXT: vmovq %xmm0, 32(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm2, (%r9) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: store_i8_stride5_vf8: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: movq (%r8), %rax ; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-FAST-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX2-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 ; AVX2-FAST-NEXT: vmovq %rax, %xmm3 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,zero,ymm2[3],zero,ymm2[19,27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,ymm2[27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero,ymm2[22,30] ; AVX2-FAST-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1] ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX2-FAST-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX2-FAST-NEXT: shrq $48, %rax ; AVX2-FAST-NEXT: vmovd %eax, %xmm1 ; AVX2-FAST-NEXT: vpbroadcastw %xmm1, %xmm1 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} xmm3 = [255,255,0,255,255,255,255,0,255,255,0,255,255,255,255,0] ; AVX2-FAST-NEXT: vpblendvb %xmm3, %xmm0, %xmm1, %xmm0 ; AVX2-FAST-NEXT: vmovq %xmm0, 32(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm2, (%r9) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: store_i8_stride5_vf8: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: movq (%r8), %rax ; AVX2-FAST-PERLANE-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-FAST-PERLANE-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-FAST-PERLANE-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-FAST-PERLANE-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX2-FAST-PERLANE-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX2-FAST-PERLANE-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vmovq %rax, %xmm3 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,zero,ymm2[3],zero,ymm2[19,27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,ymm2[27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero,ymm2[22,30] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm4, %ymm2, %ymm3, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX2-FAST-PERLANE-NEXT: shrq $48, %rax ; AVX2-FAST-PERLANE-NEXT: vmovd %eax, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastw %xmm1, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} xmm3 = [255,255,0,255,255,255,255,0,255,255,0,255,255,255,255,0] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %xmm3, %xmm0, %xmm1, %xmm0 ; AVX2-FAST-PERLANE-NEXT: vmovq %xmm0, 32(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, (%r9) ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512F-SLOW-LABEL: store_i8_stride5_vf8: ; AVX512F-SLOW: # %bb.0: ; AVX512F-SLOW-NEXT: movq (%r8), %rax ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512F-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512F-SLOW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX512F-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 ; AVX512F-SLOW-NEXT: vmovq %rax, %xmm3 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8],zero,zero,ymm2[u,1,9],zero,zero,ymm2[u,2,10],zero,zero,ymm2[u,3],zero,ymm2[19,27,u],zero,zero,ymm2[20,28,u],zero,zero,ymm2[21,29,u],zero,zero ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8,u],zero,zero,ymm2[1,9,u],zero,zero,ymm2[2,10,u],zero,ymm2[27],zero,zero,ymm2[u,20,28],zero,zero,ymm2[u,21,29],zero,zero,ymm2[u,22,30] ; AVX512F-SLOW-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] ; AVX512F-SLOW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX512F-SLOW-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512F-SLOW-NEXT: shrq $48, %rax ; AVX512F-SLOW-NEXT: vmovd %eax, %xmm1 ; AVX512F-SLOW-NEXT: vpbroadcastw %xmm1, %xmm1 ; AVX512F-SLOW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX512F-SLOW-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm0 ; AVX512F-SLOW-NEXT: vmovq %xmm1, 32(%r9) ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, (%r9) ; AVX512F-SLOW-NEXT: vzeroupper ; AVX512F-SLOW-NEXT: retq ; ; AVX512F-FAST-LABEL: store_i8_stride5_vf8: ; AVX512F-FAST: # %bb.0: ; AVX512F-FAST-NEXT: movq (%r8), %rax ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512F-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512F-FAST-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX512F-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 ; AVX512F-FAST-NEXT: vmovq %rax, %xmm3 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8],zero,zero,ymm2[u,1,9],zero,zero,ymm2[u,2,10],zero,zero,ymm2[u,3],zero,ymm2[19,27,u],zero,zero,ymm2[20,28,u],zero,zero,ymm2[21,29,u],zero,zero ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8,u],zero,zero,ymm2[1,9,u],zero,zero,ymm2[2,10,u],zero,ymm2[27],zero,zero,ymm2[u,20,28],zero,zero,ymm2[u,21,29],zero,zero,ymm2[u,22,30] ; AVX512F-FAST-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [u,0,0,0,0,u,1,1] ; AVX512F-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3 ; AVX512F-FAST-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm3 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX512F-FAST-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512F-FAST-NEXT: shrq $48, %rax ; AVX512F-FAST-NEXT: vmovd %eax, %xmm1 ; AVX512F-FAST-NEXT: vpbroadcastw %xmm1, %xmm1 ; AVX512F-FAST-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX512F-FAST-NEXT: vinserti32x4 $2, %xmm1, %zmm3, %zmm0 ; AVX512F-FAST-NEXT: vmovq %xmm1, 32(%r9) ; AVX512F-FAST-NEXT: vmovdqa %ymm0, (%r9) ; AVX512F-FAST-NEXT: vzeroupper ; AVX512F-FAST-NEXT: retq ; ; AVX512BW-SLOW-LABEL: store_i8_stride5_vf8: ; AVX512BW-SLOW: # %bb.0: ; AVX512BW-SLOW-NEXT: movq (%r8), %rax ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512BW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512BW-SLOW-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX512BW-SLOW-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX512BW-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 ; AVX512BW-SLOW-NEXT: vmovq %rax, %xmm3 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,zero,ymm2[3],zero,ymm2[19,27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,ymm2[27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero,ymm2[22,30] ; AVX512BW-SLOW-NEXT: vpor %ymm4, %ymm2, %ymm2 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[0,0,1,1] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,0,1] ; AVX512BW-SLOW-NEXT: movl $554189328, %ecx # imm = 0x21084210 ; AVX512BW-SLOW-NEXT: kmovd %ecx, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1} ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX512BW-SLOW-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512BW-SLOW-NEXT: shrq $48, %rax ; AVX512BW-SLOW-NEXT: vpbroadcastw %eax, %xmm1 ; AVX512BW-SLOW-NEXT: movw $132, %ax ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1} ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm1 ; AVX512BW-SLOW-NEXT: vmovq %xmm0, 32(%r9) ; AVX512BW-SLOW-NEXT: vmovdqa %ymm1, (%r9) ; AVX512BW-SLOW-NEXT: vzeroupper ; AVX512BW-SLOW-NEXT: retq ; ; AVX512BW-FAST-LABEL: store_i8_stride5_vf8: ; AVX512BW-FAST: # %bb.0: ; AVX512BW-FAST-NEXT: movq (%r8), %rax ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512BW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX512BW-FAST-NEXT: vmovq {{.*#+}} xmm2 = mem[0],zero ; AVX512BW-FAST-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] ; AVX512BW-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm2 ; AVX512BW-FAST-NEXT: vmovq %rax, %xmm3 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,zero,ymm2[3],zero,ymm2[19,27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,3,0,1] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,ymm2[0,8],zero,zero,zero,ymm2[1,9],zero,zero,zero,ymm2[2,10],zero,zero,ymm2[27],zero,zero,zero,ymm2[20,28],zero,zero,zero,ymm2[21,29],zero,zero,zero,ymm2[22,30] ; AVX512BW-FAST-NEXT: vpor %ymm4, %ymm2, %ymm2 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1] ; AVX512BW-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3 ; AVX512BW-FAST-NEXT: movl $554189328, %ecx # imm = 0x21084210 ; AVX512BW-FAST-NEXT: kmovd %ecx, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %ymm3, %ymm2 {%k1} ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,zero,xmm0[u,7,15],zero,zero,xmm0[u,u,u,u,u,u,u,u,u] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6,14,u],zero,zero,xmm1[7,15,u,u,u,u,u,u,u,u,u] ; AVX512BW-FAST-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512BW-FAST-NEXT: shrq $48, %rax ; AVX512BW-FAST-NEXT: vpbroadcastw %eax, %xmm1 ; AVX512BW-FAST-NEXT: movw $132, %ax ; AVX512BW-FAST-NEXT: kmovd %eax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %xmm1, %xmm0 {%k1} ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm0, %zmm2, %zmm1 ; AVX512BW-FAST-NEXT: vmovq %xmm0, 32(%r9) ; AVX512BW-FAST-NEXT: vmovdqa %ymm1, (%r9) ; AVX512BW-FAST-NEXT: vzeroupper ; AVX512BW-FAST-NEXT: retq %in.vec0 = load <8 x i8>, ptr %in.vecptr0, align 64 %in.vec1 = load <8 x i8>, ptr %in.vecptr1, align 64 %in.vec2 = load <8 x i8>, ptr %in.vecptr2, align 64 %in.vec3 = load <8 x i8>, ptr %in.vecptr3, align 64 %in.vec4 = load <8 x i8>, ptr %in.vecptr4, align 64 %1 = shufflevector <8 x i8> %in.vec0, <8 x i8> %in.vec1, <16 x i32> %2 = shufflevector <8 x i8> %in.vec2, <8 x i8> %in.vec3, <16 x i32> %3 = shufflevector <16 x i8> %1, <16 x i8> %2, <32 x i32> %4 = shufflevector <8 x i8> %in.vec4, <8 x i8> poison, <32 x i32> %5 = shufflevector <32 x i8> %3, <32 x i8> %4, <40 x i32> %interleaved.vec = shufflevector <40 x i8> %5, <40 x i8> poison, <40 x i32> store <40 x i8> %interleaved.vec, ptr %out.vec, align 64 ret void } define void @store_i8_stride5_vf16(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind { ; SSE-LABEL: store_i8_stride5_vf16: ; SSE: # %bb.0: ; SSE-NEXT: movdqa (%rdi), %xmm10 ; SSE-NEXT: movdqa (%rsi), %xmm8 ; SSE-NEXT: movdqa (%rdx), %xmm2 ; SSE-NEXT: movdqa (%rcx), %xmm4 ; SSE-NEXT: movdqa (%r8), %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[3,3,3,3,4,5,6,7] ; SSE-NEXT: movdqa %xmm2, %xmm9 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; SSE-NEXT: pand %xmm6, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,1,2,3] ; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[3,3,0,3] ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa %xmm6, %xmm5 ; SSE-NEXT: pandn %xmm3, %xmm5 ; SSE-NEXT: por %xmm1, %xmm5 ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: pand %xmm2, %xmm5 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm10[1,1,2,2] ; SSE-NEXT: movdqa %xmm10, %xmm12 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; SSE-NEXT: pand %xmm1, %xmm7 ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm8[0,1,2,1] ; SSE-NEXT: pshufhw {{.*#+}} xmm10 = xmm10[0,1,2,3,4,7,6,7] ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm10[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm10[0,0,1,0] ; SSE-NEXT: movdqa %xmm1, %xmm11 ; SSE-NEXT: pandn %xmm10, %xmm11 ; SSE-NEXT: por %xmm7, %xmm11 ; SSE-NEXT: movdqa %xmm2, %xmm10 ; SSE-NEXT: pandn %xmm11, %xmm10 ; SSE-NEXT: por %xmm5, %xmm10 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; SSE-NEXT: pand %xmm7, %xmm10 ; SSE-NEXT: movdqa %xmm0, %xmm5 ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm0[1,1,2,2] ; SSE-NEXT: movdqa %xmm7, %xmm0 ; SSE-NEXT: pandn %xmm11, %xmm0 ; SSE-NEXT: por %xmm10, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm12[2,2,3,3] ; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; SSE-NEXT: pand %xmm12, %xmm11 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm8[8],xmm0[9],xmm8[9],xmm0[10],xmm8[10],xmm0[11],xmm8[11],xmm0[12],xmm8[12],xmm0[13],xmm8[13],xmm0[14],xmm8[14],xmm0[15],xmm8[15] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm0[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm13 = xmm13[2,2,2,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm13[0,1,2,3,7,5,4,7] ; SSE-NEXT: movdqa %xmm12, %xmm14 ; SSE-NEXT: pandn %xmm13, %xmm14 ; SSE-NEXT: por %xmm11, %xmm14 ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; SSE-NEXT: movdqa %xmm13, %xmm11 ; SSE-NEXT: pandn %xmm14, %xmm11 ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm9[0,1,2,3,5,6,6,7] ; SSE-NEXT: movdqa %xmm9, %xmm10 ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[2,2,2,2] ; SSE-NEXT: movdqa %xmm1, %xmm15 ; SSE-NEXT: pandn %xmm14, %xmm15 ; SSE-NEXT: movdqa %xmm4, %xmm14 ; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8],xmm4[8],xmm14[9],xmm4[9],xmm14[10],xmm4[10],xmm14[11],xmm4[11],xmm14[12],xmm4[12],xmm14[13],xmm4[13],xmm14[14],xmm4[14],xmm14[15],xmm4[15] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,4] ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: por %xmm15, %xmm0 ; SSE-NEXT: pand %xmm13, %xmm0 ; SSE-NEXT: por %xmm11, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm5[2,2,2,2] ; SSE-NEXT: movdqa %xmm6, %xmm11 ; SSE-NEXT: pandn %xmm15, %xmm11 ; SSE-NEXT: pand %xmm6, %xmm0 ; SSE-NEXT: por %xmm0, %xmm11 ; SSE-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] ; SSE-NEXT: movdqa %xmm7, %xmm15 ; SSE-NEXT: pandn %xmm0, %xmm15 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: pand %xmm7, %xmm0 ; SSE-NEXT: por %xmm0, %xmm15 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[1,1,1,1] ; SSE-NEXT: movdqa %xmm1, %xmm3 ; SSE-NEXT: pandn %xmm0, %xmm3 ; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm8[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: por %xmm3, %xmm0 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; SSE-NEXT: pand %xmm3, %xmm0 ; SSE-NEXT: pandn %xmm15, %xmm3 ; SSE-NEXT: por %xmm0, %xmm3 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,1,1] ; SSE-NEXT: movdqa %xmm12, %xmm15 ; SSE-NEXT: pandn %xmm0, %xmm15 ; SSE-NEXT: pand %xmm12, %xmm3 ; SSE-NEXT: por %xmm3, %xmm15 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: pand %xmm12, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm4[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,1] ; SSE-NEXT: pandn %xmm3, %xmm12 ; SSE-NEXT: por %xmm0, %xmm12 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,0,0,0] ; SSE-NEXT: pand %xmm6, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,1,3] ; SSE-NEXT: pandn %xmm3, %xmm6 ; SSE-NEXT: por %xmm0, %xmm6 ; SSE-NEXT: pand %xmm13, %xmm6 ; SSE-NEXT: pandn %xmm12, %xmm13 ; SSE-NEXT: por %xmm6, %xmm13 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm5[0,0,0,0] ; SSE-NEXT: movdqa %xmm1, %xmm6 ; SSE-NEXT: pandn %xmm0, %xmm6 ; SSE-NEXT: pand %xmm1, %xmm13 ; SSE-NEXT: por %xmm13, %xmm6 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm10[0,1,2,3,7,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2] ; SSE-NEXT: pand %xmm1, %xmm0 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm14[0,1,2,3,5,7,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,3,2] ; SSE-NEXT: pandn %xmm3, %xmm1 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[3,3,3,3] ; SSE-NEXT: pand %xmm7, %xmm0 ; SSE-NEXT: pshufhw $167, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload ; SSE-NEXT: # xmm3 = mem[0,1,2,3,7,5,6,6] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,2] ; SSE-NEXT: pandn %xmm3, %xmm7 ; SSE-NEXT: por %xmm0, %xmm7 ; SSE-NEXT: pand %xmm2, %xmm7 ; SSE-NEXT: pandn %xmm1, %xmm2 ; SSE-NEXT: por %xmm7, %xmm2 ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; SSE-NEXT: pand %xmm0, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3] ; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: por %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm0, 64(%r9) ; SSE-NEXT: movdqa %xmm6, (%r9) ; SSE-NEXT: movdqa %xmm15, 16(%r9) ; SSE-NEXT: movdqa %xmm11, 48(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 32(%r9) ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: store_i8_stride5_vf16: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm2 ; AVX1-ONLY-NEXT: vmovdqa (%rsi), %xmm3 ; AVX1-ONLY-NEXT: vmovdqa (%rdx), %xmm1 ; AVX1-ONLY-NEXT: vmovdqa (%rcx), %xmm4 ; AVX1-ONLY-NEXT: vmovdqa (%r8), %xmm0 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,xmm4[6,u,u,u],zero,xmm4[7,u,u,u],zero,xmm4[8,u,u,u],zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm1[6],zero,xmm1[u,u,u,7],zero,xmm1[u,u,u,8],zero,xmm1[u,u,u,9] ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[u,u,u],zero,xmm3[7,u,u,u],zero,xmm3[8,u,u,u],zero,xmm3[9,u] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm2[u,u,u,7],zero,xmm2[u,u,u,8],zero,xmm2[u,u,u,9],zero,xmm2[u] ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm7, %xmm6 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm7 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255] ; AVX1-ONLY-NEXT: vpblendvb %xmm7, %xmm5, %xmm6, %xmm5 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,1],zero,xmm5[3,4,5,6],zero,xmm5[8,9,10,11],zero,xmm5[13,14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,xmm0[6],zero,zero,zero,zero,xmm0[7],zero,zero,zero,zero,xmm0[8],zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm5, %xmm5 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm4[8],xmm1[9],xmm4[9],xmm1[10],xmm4[10],xmm1[11],xmm4[11],xmm1[12],xmm4[12],xmm1[13],xmm4[13],xmm1[14],xmm4[14],xmm1[15],xmm4[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm6[10,11],zero,zero,zero,xmm6[12,13],zero,zero,zero,xmm6[14,15],zero ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm7 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm7[10,11],zero,zero,zero,xmm7[12,13],zero,zero,zero,xmm7[14,15],zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm8, %xmm6 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,xmm8[0,1],zero,zero,zero,xmm8[2,3],zero,zero,zero,xmm8[4,5],zero,zero ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[0,1],zero,zero,zero,xmm10[2,3],zero,zero,zero,xmm10[4,5],zero,zero,zero,xmm10[6] ; AVX1-ONLY-NEXT: vpor %xmm9, %xmm10, %xmm9 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm10 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,xmm0[2],zero ; AVX1-ONLY-NEXT: vpor %xmm10, %xmm9, %xmm9 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm8[6,7],zero,zero,zero,xmm8[8,9],zero,zero,zero,xmm8[10,11],zero,zero,zero ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[6],zero,zero,zero,xmm2[9,8],zero,zero,zero,xmm2[11,10],zero,zero,zero,xmm2[13,12] ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm8, %xmm2 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm0[3],zero,zero,zero,zero,xmm0[4],zero,zero,zero,zero,xmm0[5],zero,zero ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,xmm7[4,5],zero,zero,zero,xmm7[6,7],zero,zero,zero,xmm7[8,9],zero,zero ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm4[8],xmm1[8],xmm4[9],xmm1[9],xmm4[10],xmm1[10],xmm4[11],xmm1[11],xmm4[12],xmm1[12],xmm4[13],xmm1[13],xmm4[14],xmm1[14],xmm4[15],xmm1[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[2],zero,zero,zero,xmm1[5,4],zero,zero,zero,xmm1[7,6],zero,zero,zero,xmm1[9,8] ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,xmm0[9],zero,zero,zero,zero,xmm0[10],zero,zero,zero,zero,xmm0[11],zero,zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, 48(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 16(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm9, (%r9) ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12],zero,zero,zero,zero,xmm0[13],zero,zero,zero,zero,xmm0[14],zero,zero,zero,zero,xmm0[15] ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm6, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm5, 32(%r9) ; AVX1-ONLY-NEXT: retq ; ; AVX2-SLOW-LABEL: store_i8_stride5_vf16: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm0 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm1 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm3 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm4 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm2 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5 ; AVX2-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[6],zero,zero,zero,zero,ymm6[7],zero,zero,zero,zero,ymm6[8],zero,zero,zero,zero,ymm6[9,25],zero,zero,zero,zero,ymm6[26],zero,zero,zero,zero,ymm6[27],zero,zero,zero,zero,ymm6[28] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[6],zero,zero,zero,zero,ymm8[7],zero,zero,zero,zero,ymm8[8],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[26],zero,zero,zero,zero,ymm8[27],zero,zero,zero,zero,ymm8[28],zero ; AVX2-SLOW-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm5[7],zero,zero,zero,zero,ymm5[8],zero,zero,zero,zero,ymm5[9],zero,zero,zero,zero,zero,ymm5[26],zero,zero,zero,zero,ymm5[27],zero,zero,zero,zero,ymm5[28],zero,zero ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm5[2,3,0,1] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[7],zero,zero,zero,zero,ymm9[8],zero,zero,zero,zero,ymm9[9],zero,zero,zero,ymm9[26],zero,zero,zero,zero,ymm9[27],zero,zero,zero,zero,ymm9[28],zero,zero,zero ; AVX2-SLOW-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[1,1,2,2] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm6[0,8],zero,zero,zero,ymm6[1,9],zero,zero,zero,ymm6[2,10],zero,zero,zero,ymm6[19,27],zero,zero,zero,ymm6[20,28],zero,zero,zero,ymm6[21,29],zero,zero,zero ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,0] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,8],zero,zero,zero,ymm5[1,9],zero,zero,zero,ymm5[2,10],zero,zero,zero,ymm5[3,19],zero,zero,zero,ymm5[28,20],zero,zero,zero,ymm5[29,21],zero,zero,zero,ymm5[30,22] ; AVX2-SLOW-NEXT: vpor %ymm6, %ymm5, %ymm5 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm5, %ymm6, %ymm5 ; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[10,11],zero,zero,zero,xmm3[12,13],zero,zero,zero,xmm3[14,15],zero ; AVX2-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero ; AVX2-SLOW-NEXT: vpor %xmm3, %xmm0, %xmm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm2[12],zero,zero,zero,zero,xmm2[13],zero,zero,zero,zero,xmm2[14],zero,zero,zero,zero,xmm2[15] ; AVX2-SLOW-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-SLOW-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm5, (%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm7, 32(%r9) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: store_i8_stride5_vf16: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm0 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm1 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm2 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm3 ; AVX2-FAST-NEXT: vmovdqa (%r8), %xmm4 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5 ; AVX2-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm6 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,2,0,2] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8],zero,zero,zero,ymm7[1,9],zero,zero,zero,ymm7[2,10],zero,zero,zero,ymm7[19,27],zero,zero,zero,ymm7[20,28],zero,zero,zero,ymm7[21,29],zero,zero,zero ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm5[0,2,2,0] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[0,8],zero,zero,zero,ymm8[1,9],zero,zero,zero,ymm8[2,10],zero,zero,zero,ymm8[3,19],zero,zero,zero,ymm8[28,20],zero,zero,zero,ymm8[29,21],zero,zero,zero,ymm8[30,22] ; AVX2-FAST-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [0,0,0,0,0,0,1,1] ; AVX2-FAST-NEXT: vpermd %ymm4, %ymm8, %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [1,5,2,6,2,6,3,7] ; AVX2-FAST-NEXT: vpermd %ymm5, %ymm8, %ymm5 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,zero,ymm5[3,7],zero,zero,zero,ymm5[8,12],zero,zero,zero,ymm5[9,13],zero,zero,zero,ymm5[18,22],zero,zero,zero,ymm5[19,23],zero,zero,zero,ymm5[24,28],zero,zero ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [1,5,2,6,6,2,3,7] ; AVX2-FAST-NEXT: vpermd %ymm6, %ymm8, %ymm6 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[2,6],zero,zero,zero,ymm6[3,7],zero,zero,zero,ymm6[8,12],zero,zero,zero,ymm6[9,17],zero,zero,zero,ymm6[22,18],zero,zero,zero,ymm6[23,19],zero,zero,zero,ymm6[24,28] ; AVX2-FAST-NEXT: vpor %ymm5, %ymm6, %ymm5 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [1,1,2,2,2,2,2,2] ; AVX2-FAST-NEXT: vpermd %ymm4, %ymm6, %ymm6 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm5, %ymm6, %ymm5 ; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[10,11],zero,zero,zero,xmm2[12,13],zero,zero,zero,xmm2[14,15],zero ; AVX2-FAST-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero ; AVX2-FAST-NEXT: vpor %xmm2, %xmm0, %xmm0 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[12],zero,zero,zero,zero,xmm4[13],zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,xmm4[15] ; AVX2-FAST-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-FAST-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm5, 32(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm7, (%r9) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: store_i8_stride5_vf16: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm2 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[6],zero,zero,zero,zero,ymm6[7],zero,zero,zero,zero,ymm6[8],zero,zero,zero,zero,ymm6[9,25],zero,zero,zero,zero,ymm6[26],zero,zero,zero,zero,ymm6[27],zero,zero,zero,zero,ymm6[28] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm8 = zero,ymm8[6],zero,zero,zero,zero,ymm8[7],zero,zero,zero,zero,ymm8[8],zero,zero,zero,zero,zero,zero,zero,zero,ymm8[26],zero,zero,zero,zero,ymm8[27],zero,zero,zero,zero,ymm8[28],zero ; AVX2-FAST-PERLANE-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,ymm5[7],zero,zero,zero,zero,ymm5[8],zero,zero,zero,zero,ymm5[9],zero,zero,zero,zero,zero,ymm5[26],zero,zero,zero,zero,ymm5[27],zero,zero,zero,zero,ymm5[28],zero,zero ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm5[2,3,0,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm9 = zero,zero,zero,zero,ymm9[7],zero,zero,zero,zero,ymm9[8],zero,zero,zero,zero,ymm9[9],zero,zero,zero,ymm9[26],zero,zero,zero,zero,ymm9[27],zero,zero,zero,zero,ymm9[28],zero,zero,zero ; AVX2-FAST-PERLANE-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm8 = xmm2[1,1,2,2] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm6 = zero,zero,ymm6[0,8],zero,zero,zero,ymm6[1,9],zero,zero,zero,ymm6[2,10],zero,zero,zero,ymm6[19,27],zero,zero,zero,ymm6[20,28],zero,zero,zero,ymm6[21,29],zero,zero,zero ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,0] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,8],zero,zero,zero,ymm5[1,9],zero,zero,zero,ymm5[2,10],zero,zero,zero,ymm5[3,19],zero,zero,zero,ymm5[28,20],zero,zero,zero,ymm5[29,21],zero,zero,zero,ymm5[30,22] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm6, %ymm5, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm6 = xmm2[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,0,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm8, %ymm5, %ymm6, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm3 = zero,zero,zero,xmm3[10,11],zero,zero,zero,xmm3[12,13],zero,zero,zero,xmm3[14,15],zero ; AVX2-FAST-PERLANE-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero ; AVX2-FAST-PERLANE-NEXT: vpor %xmm3, %xmm0, %xmm0 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm1 = xmm2[12],zero,zero,zero,zero,xmm2[13],zero,zero,zero,zero,xmm2[14],zero,zero,zero,zero,xmm2[15] ; AVX2-FAST-PERLANE-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, (%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm7, 32(%r9) ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512F-SLOW-LABEL: store_i8_stride5_vf16: ; AVX512F-SLOW: # %bb.0: ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512F-SLOW-NEXT: vmovdqa (%rsi), %xmm2 ; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %xmm3 ; AVX512F-SLOW-NEXT: vmovdqa (%rcx), %xmm4 ; AVX512F-SLOW-NEXT: vmovdqa (%r8), %xmm1 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm5 ; AVX512F-SLOW-NEXT: vinserti128 $1, %xmm2, %ymm0, %ymm6 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = ymm6[u,u,u,7],zero,ymm6[u,u,u,8],zero,ymm6[u,u,u,9],zero,ymm6[u,u,u],zero,ymm6[26,u,u,u],zero,ymm6[27,u,u,u],zero,ymm6[28,u,u] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm8[u,u,u],zero,ymm8[7,u,u,u],zero,ymm8[8,u,u,u],zero,ymm8[9,u,u,u,26],zero,ymm8[u,u,u,27],zero,ymm8[u,u,u,28],zero,ymm8[u,u] ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; AVX512F-SLOW-NEXT: vpternlogq $50, %ymm7, %ymm9, %ymm8 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm5[0,2,0,2] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8,u],zero,zero,ymm7[1,9,u],zero,zero,ymm7[2,10,u],zero,zero,ymm7[19,27,u],zero,zero,ymm7[20,28,u],zero,zero,ymm7[21,29,u],zero,zero ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[6],zero,ymm5[u,u,u,7],zero,ymm5[u,u,u,8],zero,ymm5[u,u,u,9,25,u,u,u],zero,ymm5[26,u,u,u],zero,ymm5[27,u,u,u],zero,ymm5[28] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,3,0,1] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = zero,ymm5[6,u,u,u],zero,ymm5[7,u,u,u],zero,ymm5[8,u,u,u],zero,zero,ymm5[u,u,u,26],zero,ymm5[u,u,u,27],zero,ymm5[u,u,u,28],zero ; AVX512F-SLOW-NEXT: vpternlogq $200, %ymm8, %ymm9, %ymm5 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[0,8],zero,zero,ymm6[u,1,9],zero,zero,ymm6[u,2,10],zero,zero,ymm6[u,3,19],zero,zero,ymm6[u,28,20],zero,zero,ymm6[u,29,21],zero,zero,ymm6[u,30,22] ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5 ; AVX512F-SLOW-NEXT: vporq %zmm7, %zmm5, %zmm5 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [u,0,0,0,0,u,1,1,1,1,u,2,2,2,2,u] ; AVX512F-SLOW-NEXT: vpermd %zmm1, %zmm6, %zmm6 ; AVX512F-SLOW-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6 ; AVX512F-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u],zero,zero,xmm3[10,11,u],zero,zero,xmm3[12,13,u],zero,zero,xmm3[14,15,u] ; AVX512F-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm2[8],xmm0[9],xmm2[9],xmm0[10],xmm2[10],xmm0[11],xmm2[11],xmm0[12],xmm2[12],xmm0[13],xmm2[13],xmm0[14],xmm2[14],xmm0[15],xmm2[15] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,10,11],zero,zero,xmm0[u,12,13],zero,zero,xmm0[u,14,15],zero,zero,xmm0[u] ; AVX512F-SLOW-NEXT: vpor %xmm3, %xmm0, %xmm0 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[1,2,3,4],zero,xmm0[6,7,8,9],zero,xmm0[11,12,13,14],zero ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[12],zero,zero,zero,zero,xmm1[13],zero,zero,zero,zero,xmm1[14],zero,zero,zero,zero,xmm1[15] ; AVX512F-SLOW-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX512F-SLOW-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm6, (%r9) ; AVX512F-SLOW-NEXT: vzeroupper ; AVX512F-SLOW-NEXT: retq ; ; AVX512F-FAST-LABEL: store_i8_stride5_vf16: ; AVX512F-FAST: # %bb.0: ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm1 ; AVX512F-FAST-NEXT: vmovdqa (%rsi), %xmm2 ; AVX512F-FAST-NEXT: vmovdqa (%rdx), %xmm3 ; AVX512F-FAST-NEXT: vmovdqa (%rcx), %xmm4 ; AVX512F-FAST-NEXT: vmovdqa (%r8), %xmm0 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm2, %ymm1, %ymm5 ; AVX512F-FAST-NEXT: vinserti128 $1, %xmm4, %ymm3, %ymm6 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm6[0,2,0,2] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm7[0,8,u],zero,zero,ymm7[1,9,u],zero,zero,ymm7[2,10,u],zero,zero,ymm7[19,27,u],zero,zero,ymm7[20,28,u],zero,zero,ymm7[21,29,u],zero,zero ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [1,5,2,6,2,6,3,7] ; AVX512F-FAST-NEXT: vpermd %ymm5, %ymm8, %ymm8 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,ymm8[u,3,7],zero,zero,ymm8[u,8,12],zero,zero,ymm8[u,9,13],zero,zero,ymm8[u,18,22],zero,zero,ymm8[u,19,23],zero,zero,ymm8[u,24,28],zero,zero ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm8, %zmm7, %zmm7 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,2,0] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[0,8],zero,zero,ymm5[u,1,9],zero,zero,ymm5[u,2,10],zero,zero,ymm5[u,3,19],zero,zero,ymm5[u,28,20],zero,zero,ymm5[u,29,21],zero,zero,ymm5[u,30,22] ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [1,5,2,6,6,2,3,7] ; AVX512F-FAST-NEXT: vpermd %ymm6, %ymm8, %ymm6 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[2,6,u],zero,zero,ymm6[3,7,u],zero,zero,ymm6[8,12,u],zero,zero,ymm6[9,17,u],zero,zero,ymm6[22,18,u],zero,zero,ymm6[23,19,u],zero,zero,ymm6[24,28] ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm6, %zmm5, %zmm5 ; AVX512F-FAST-NEXT: vporq %zmm7, %zmm5, %zmm5 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm6 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u] ; AVX512F-FAST-NEXT: vpermd %zmm6, %zmm7, %zmm6 ; AVX512F-FAST-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6 ; AVX512F-FAST-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm3[8],xmm4[8],xmm3[9],xmm4[9],xmm3[10],xmm4[10],xmm3[11],xmm4[11],xmm3[12],xmm4[12],xmm3[13],xmm4[13],xmm3[14],xmm4[14],xmm3[15],xmm4[15] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u],zero,zero,xmm3[10,11,u],zero,zero,xmm3[12,13,u],zero,zero,xmm3[14,15,u] ; AVX512F-FAST-NEXT: vpunpckhbw {{.*#+}} xmm1 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[u,10,11],zero,zero,xmm1[u,12,13],zero,zero,xmm1[u,14,15],zero,zero,xmm1[u] ; AVX512F-FAST-NEXT: vpor %xmm3, %xmm1, %xmm1 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3,4],zero,xmm1[6,7,8,9],zero,xmm1[11,12,13,14],zero ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[12],zero,zero,zero,zero,xmm0[13],zero,zero,zero,zero,xmm0[14],zero,zero,zero,zero,xmm0[15] ; AVX512F-FAST-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512F-FAST-NEXT: vmovdqa %xmm0, 64(%r9) ; AVX512F-FAST-NEXT: vmovdqa64 %zmm6, (%r9) ; AVX512F-FAST-NEXT: vzeroupper ; AVX512F-FAST-NEXT: retq ; ; AVX512BW-SLOW-LABEL: store_i8_stride5_vf16: ; AVX512BW-SLOW: # %bb.0: ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512BW-SLOW-NEXT: vmovdqa (%rsi), %xmm1 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdx), %xmm2 ; AVX512BW-SLOW-NEXT: vmovdqa (%rcx), %xmm3 ; AVX512BW-SLOW-NEXT: vmovdqa (%r8), %xmm4 ; AVX512BW-SLOW-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm5 ; AVX512BW-SLOW-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm6 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,zero,ymm6[7],zero,zero,zero,zero,ymm6[8],zero,zero,zero,zero,ymm6[9],zero,zero,zero,zero,zero,ymm6[26],zero,zero,zero,zero,ymm6[27],zero,zero,zero,zero,ymm6[28],zero,zero ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm6[2,3,0,1] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm8[7],zero,zero,zero,zero,ymm8[8],zero,zero,zero,zero,ymm8[9],zero,zero,zero,ymm8[26],zero,zero,zero,zero,ymm8[27],zero,zero,zero,zero,ymm8[28],zero,zero,zero ; AVX512BW-SLOW-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm5[6],zero,zero,zero,zero,ymm5[7],zero,zero,zero,zero,ymm5[8],zero,zero,zero,zero,ymm5[9,25],zero,zero,zero,zero,ymm5[26],zero,zero,zero,zero,ymm5[27],zero,zero,zero,zero,ymm5[28] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm5[2,3,0,1] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = zero,ymm9[6],zero,zero,zero,zero,ymm9[7],zero,zero,zero,zero,ymm9[8],zero,zero,zero,zero,zero,zero,zero,zero,ymm9[26],zero,zero,zero,zero,ymm9[27],zero,zero,zero,zero,ymm9[28],zero ; AVX512BW-SLOW-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX512BW-SLOW-NEXT: movl $831283992, %eax # imm = 0x318C6318 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm7, %ymm8 {%k1} ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,2,0] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[0,8],zero,zero,zero,ymm6[1,9],zero,zero,zero,ymm6[2,10],zero,zero,zero,ymm6[3,19],zero,zero,zero,ymm6[28,20],zero,zero,zero,ymm6[29,21],zero,zero,zero,ymm6[30,22] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,2,0,2] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = zero,zero,ymm5[0,8],zero,zero,zero,ymm5[1,9],zero,zero,zero,ymm5[2,10],zero,zero,zero,ymm5[19,27],zero,zero,zero,ymm5[20,28],zero,zero,zero,ymm5[21,29],zero,zero,zero ; AVX512BW-SLOW-NEXT: vpor %ymm6, %ymm5, %ymm5 ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm8, %zmm5, %zmm5 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2] ; AVX512BW-SLOW-NEXT: vpermd %zmm4, %zmm6, %zmm6 ; AVX512BW-SLOW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm6, %zmm5 {%k1} ; AVX512BW-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[10,11],zero,zero,zero,xmm2[12,13],zero,zero,zero,xmm2[14,15],zero ; AVX512BW-SLOW-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[12],zero,zero,zero,zero,xmm4[13],zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,xmm4[15] ; AVX512BW-SLOW-NEXT: vpternlogq $254, %xmm2, %xmm0, %xmm1 ; AVX512BW-SLOW-NEXT: vmovdqa %xmm1, 64(%r9) ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm5, (%r9) ; AVX512BW-SLOW-NEXT: vzeroupper ; AVX512BW-SLOW-NEXT: retq ; ; AVX512BW-FAST-LABEL: store_i8_stride5_vf16: ; AVX512BW-FAST: # %bb.0: ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512BW-FAST-NEXT: vmovdqa (%rsi), %xmm1 ; AVX512BW-FAST-NEXT: vmovdqa (%rdx), %xmm2 ; AVX512BW-FAST-NEXT: vmovdqa (%rcx), %xmm3 ; AVX512BW-FAST-NEXT: vmovdqa (%r8), %xmm4 ; AVX512BW-FAST-NEXT: vinserti128 $1, %xmm1, %ymm0, %ymm5 ; AVX512BW-FAST-NEXT: vinserti128 $1, %xmm3, %ymm2, %ymm6 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [1,5,2,6,6,2,3,7] ; AVX512BW-FAST-NEXT: vpermd %ymm6, %ymm7, %ymm7 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm5[0,2,2,0] ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} zmm7 = zmm7[0,8],zero,zero,zero,zmm7[1,9],zero,zero,zero,zmm7[2,10],zero,zero,zero,zmm7[3,19],zero,zero,zero,zmm7[28,20],zero,zero,zero,zmm7[29,21],zero,zero,zero,zmm7[30,22,34,38],zero,zero,zero,zmm7[35,39],zero,zero,zero,zmm7[40,44],zero,zero,zero,zmm7[41,49],zero,zero,zero,zmm7[54,50],zero,zero,zero,zmm7[55,51],zero,zero,zero,zmm7[56,60] ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [1,5,2,6,2,6,3,7] ; AVX512BW-FAST-NEXT: vpermd %ymm5, %ymm8, %ymm5 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,2,0,2] ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm5, %zmm6, %zmm5 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} zmm5 = zero,zero,zmm5[0,8],zero,zero,zero,zmm5[1,9],zero,zero,zero,zmm5[2,10],zero,zero,zero,zmm5[19,27],zero,zero,zero,zmm5[20,28],zero,zero,zero,zmm5[21,29],zero,zero,zero,zero,zero,zero,zmm5[35,39],zero,zero,zero,zmm5[40,44],zero,zero,zero,zmm5[41,45],zero,zero,zero,zmm5[50,54],zero,zero,zero,zmm5[51,55],zero,zero,zero,zmm5[56,60],zero,zero ; AVX512BW-FAST-NEXT: vporq %zmm7, %zmm5, %zmm5 ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm4, %zmm4, %zmm6 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10] ; AVX512BW-FAST-NEXT: vpermd %zmm6, %zmm7, %zmm6 ; AVX512BW-FAST-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210 ; AVX512BW-FAST-NEXT: kmovq %rax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm6, %zmm5 {%k1} ; AVX512BW-FAST-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm2[8],xmm3[8],xmm2[9],xmm3[9],xmm2[10],xmm3[10],xmm2[11],xmm3[11],xmm2[12],xmm3[12],xmm2[13],xmm3[13],xmm2[14],xmm3[14],xmm2[15],xmm3[15] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm2[10,11],zero,zero,zero,xmm2[12,13],zero,zero,zero,xmm2[14,15],zero ; AVX512BW-FAST-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[10,11],zero,zero,zero,xmm0[12,13],zero,zero,zero,xmm0[14,15],zero,zero,zero ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm1 = xmm4[12],zero,zero,zero,zero,xmm4[13],zero,zero,zero,zero,xmm4[14],zero,zero,zero,zero,xmm4[15] ; AVX512BW-FAST-NEXT: vpternlogq $254, %xmm2, %xmm0, %xmm1 ; AVX512BW-FAST-NEXT: vmovdqa %xmm1, 64(%r9) ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm5, (%r9) ; AVX512BW-FAST-NEXT: vzeroupper ; AVX512BW-FAST-NEXT: retq %in.vec0 = load <16 x i8>, ptr %in.vecptr0, align 64 %in.vec1 = load <16 x i8>, ptr %in.vecptr1, align 64 %in.vec2 = load <16 x i8>, ptr %in.vecptr2, align 64 %in.vec3 = load <16 x i8>, ptr %in.vecptr3, align 64 %in.vec4 = load <16 x i8>, ptr %in.vecptr4, align 64 %1 = shufflevector <16 x i8> %in.vec0, <16 x i8> %in.vec1, <32 x i32> %2 = shufflevector <16 x i8> %in.vec2, <16 x i8> %in.vec3, <32 x i32> %3 = shufflevector <32 x i8> %1, <32 x i8> %2, <64 x i32> %4 = shufflevector <16 x i8> %in.vec4, <16 x i8> poison, <64 x i32> %5 = shufflevector <64 x i8> %3, <64 x i8> %4, <80 x i32> %interleaved.vec = shufflevector <80 x i8> %5, <80 x i8> poison, <80 x i32> store <80 x i8> %interleaved.vec, ptr %out.vec, align 64 ret void } define void @store_i8_stride5_vf32(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind { ; SSE-LABEL: store_i8_stride5_vf32: ; SSE: # %bb.0: ; SSE-NEXT: subq $152, %rsp ; SSE-NEXT: movdqa 16(%rdi), %xmm3 ; SSE-NEXT: movdqa (%rsi), %xmm9 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 16(%rsi), %xmm7 ; SSE-NEXT: movdqa (%rdx), %xmm2 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 16(%rdx), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa (%rcx), %xmm11 ; SSE-NEXT: movdqa 16(%rcx), %xmm12 ; SSE-NEXT: movdqa 16(%r8), %xmm14 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa {{.*#+}} xmm13 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; SSE-NEXT: pand %xmm13, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm12[2,1,2,3] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,0,3] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa %xmm13, %xmm4 ; SSE-NEXT: pandn %xmm1, %xmm4 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: pand %xmm8, %xmm4 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm3[1,1,2,2] ; SSE-NEXT: movdqa %xmm3, %xmm15 ; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; SSE-NEXT: pand %xmm3, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,1,2,1] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7] ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,0] ; SSE-NEXT: movdqa %xmm3, %xmm5 ; SSE-NEXT: pandn %xmm1, %xmm5 ; SSE-NEXT: por %xmm0, %xmm5 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: por %xmm4, %xmm0 ; SSE-NEXT: movdqa {{.*#+}} xmm10 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; SSE-NEXT: pand %xmm10, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm14[1,1,2,2] ; SSE-NEXT: movdqa %xmm10, %xmm4 ; SSE-NEXT: pandn %xmm1, %xmm4 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm2[3,3,3,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: pand %xmm13, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm11[2,1,2,3] ; SSE-NEXT: movdqa %xmm11, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,0,3] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa %xmm13, %xmm4 ; SSE-NEXT: pandn %xmm1, %xmm4 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm9[0,1,2,1] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] ; SSE-NEXT: movdqa %xmm3, %xmm1 ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: movdqa (%rdi), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] ; SSE-NEXT: pand %xmm3, %xmm0 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: pand %xmm8, %xmm4 ; SSE-NEXT: por %xmm4, %xmm0 ; SSE-NEXT: movdqa (%r8), %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] ; SSE-NEXT: movdqa %xmm10, %xmm4 ; SSE-NEXT: pandn %xmm1, %xmm4 ; SSE-NEXT: pand %xmm10, %xmm0 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa %xmm7, %xmm11 ; SSE-NEXT: movdqa %xmm7, %xmm0 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm7[8],xmm0[9],xmm7[9],xmm0[10],xmm7[10],xmm0[11],xmm7[11],xmm0[12],xmm7[12],xmm0[13],xmm7[13],xmm0[14],xmm7[14],xmm0[15],xmm7[15] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,2,2,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,4,7] ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; SSE-NEXT: movdqa %xmm9, %xmm4 ; SSE-NEXT: pandn %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm15, %xmm6 ; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm15[2,2,3,3] ; SSE-NEXT: pand %xmm9, %xmm0 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm4, %xmm0 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm7[0,1,2,3,5,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[2,2,2,2] ; SSE-NEXT: movdqa %xmm3, %xmm5 ; SSE-NEXT: pandn %xmm4, %xmm5 ; SSE-NEXT: movdqa %xmm12, %xmm15 ; SSE-NEXT: punpckhbw {{.*#+}} xmm15 = xmm15[8],xmm12[8],xmm15[9],xmm12[9],xmm15[10],xmm12[10],xmm15[11],xmm12[11],xmm15[12],xmm12[12],xmm15[13],xmm12[13],xmm15[14],xmm12[14],xmm15[15],xmm12[15] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm15[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,6,4] ; SSE-NEXT: pand %xmm3, %xmm4 ; SSE-NEXT: por %xmm5, %xmm4 ; SSE-NEXT: pand %xmm8, %xmm4 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,2,2,2] ; SSE-NEXT: movdqa %xmm13, %xmm1 ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: pand %xmm13, %xmm4 ; SSE-NEXT: por %xmm4, %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: punpcklbw {{.*#+}} xmm12 = xmm12[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: movdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] ; SSE-NEXT: movdqa %xmm10, %xmm4 ; SSE-NEXT: pandn %xmm0, %xmm4 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm7[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: pand %xmm10, %xmm0 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; SSE-NEXT: movdqa %xmm1, %xmm5 ; SSE-NEXT: pandn %xmm4, %xmm5 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,1,1] ; SSE-NEXT: movdqa %xmm3, %xmm7 ; SSE-NEXT: pandn %xmm4, %xmm7 ; SSE-NEXT: movdqa %xmm11, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm11[0],xmm0[1],xmm11[1],xmm0[2],xmm11[2],xmm0[3],xmm11[3],xmm0[4],xmm11[4],xmm0[5],xmm11[5],xmm0[6],xmm11[6],xmm0[7],xmm11[7] ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm3, %xmm4 ; SSE-NEXT: por %xmm7, %xmm4 ; SSE-NEXT: pand %xmm1, %xmm4 ; SSE-NEXT: por %xmm5, %xmm4 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm14[0,0,1,1] ; SSE-NEXT: movdqa %xmm9, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: pand %xmm9, %xmm4 ; SSE-NEXT: por %xmm4, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload ; SSE-NEXT: movdqa %xmm11, %xmm0 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm11[8],xmm0[9],xmm11[9],xmm0[10],xmm11[10],xmm0[11],xmm11[11],xmm0[12],xmm11[12],xmm0[13],xmm11[13],xmm0[14],xmm11[14],xmm0[15],xmm11[15] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm0[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[2,2,2,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,7,5,4,7] ; SSE-NEXT: movdqa %xmm9, %xmm5 ; SSE-NEXT: pandn %xmm4, %xmm5 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[2,2,3,3] ; SSE-NEXT: pand %xmm9, %xmm4 ; SSE-NEXT: por %xmm4, %xmm5 ; SSE-NEXT: movdqa %xmm8, %xmm4 ; SSE-NEXT: pandn %xmm5, %xmm4 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm0[0,1,2,3,5,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,2,2,2] ; SSE-NEXT: movdqa %xmm3, %xmm7 ; SSE-NEXT: pandn %xmm5, %xmm7 ; SSE-NEXT: movdqa %xmm2, %xmm12 ; SSE-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm2[8],xmm12[9],xmm2[9],xmm12[10],xmm2[10],xmm12[11],xmm2[11],xmm12[12],xmm2[12],xmm12[13],xmm2[13],xmm12[14],xmm2[14],xmm12[15],xmm2[15] ; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm12[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm14 = xmm14[1,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm14 = xmm14[0,1,2,3,4,7,6,4] ; SSE-NEXT: pand %xmm3, %xmm14 ; SSE-NEXT: por %xmm7, %xmm14 ; SSE-NEXT: pand %xmm8, %xmm14 ; SSE-NEXT: por %xmm4, %xmm14 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,2,2,2] ; SSE-NEXT: movdqa %xmm13, %xmm7 ; SSE-NEXT: pandn %xmm4, %xmm7 ; SSE-NEXT: pand %xmm13, %xmm14 ; SSE-NEXT: por %xmm14, %xmm7 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5] ; SSE-NEXT: movdqa %xmm10, %xmm14 ; SSE-NEXT: pandn %xmm4, %xmm14 ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm0[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,0,0,0] ; SSE-NEXT: pand %xmm10, %xmm4 ; SSE-NEXT: por %xmm4, %xmm14 ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm6[1,1,1,1] ; SSE-NEXT: movdqa %xmm3, %xmm2 ; SSE-NEXT: pandn %xmm4, %xmm2 ; SSE-NEXT: punpcklbw {{.*#+}} xmm11 = xmm11[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm11[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm3, %xmm4 ; SSE-NEXT: por %xmm2, %xmm4 ; SSE-NEXT: pand %xmm1, %xmm4 ; SSE-NEXT: pandn %xmm14, %xmm1 ; SSE-NEXT: por %xmm4, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm5[0,0,1,1] ; SSE-NEXT: movdqa %xmm9, %xmm7 ; SSE-NEXT: pandn %xmm2, %xmm7 ; SSE-NEXT: pand %xmm9, %xmm1 ; SSE-NEXT: por %xmm1, %xmm7 ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm15[0,1,2,3,5,7,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,3,2] ; SSE-NEXT: movdqa %xmm3, %xmm2 ; SSE-NEXT: pandn %xmm0, %xmm2 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm5[0,1,2,3,7,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,2,2,2] ; SSE-NEXT: pand %xmm3, %xmm0 ; SSE-NEXT: por %xmm0, %xmm2 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: movdqa %xmm6, %xmm0 ; SSE-NEXT: pandn %xmm2, %xmm0 ; SSE-NEXT: pshufhw $167, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload ; SSE-NEXT: # xmm2 = mem[0,1,2,3,7,5,6,6] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,2,2] ; SSE-NEXT: movdqa %xmm10, %xmm14 ; SSE-NEXT: pandn %xmm2, %xmm14 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[3,3,3,3] ; SSE-NEXT: pand %xmm10, %xmm2 ; SSE-NEXT: por %xmm2, %xmm14 ; SSE-NEXT: pand %xmm6, %xmm14 ; SSE-NEXT: por %xmm0, %xmm14 ; SSE-NEXT: movdqa {{.*#+}} xmm11 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[3,3,3,3] ; SSE-NEXT: movdqa %xmm11, %xmm15 ; SSE-NEXT: pandn %xmm0, %xmm15 ; SSE-NEXT: pand %xmm11, %xmm14 ; SSE-NEXT: por %xmm14, %xmm15 ; SSE-NEXT: pshuflw $225, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload ; SSE-NEXT: # xmm0 = mem[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,1] ; SSE-NEXT: movdqa %xmm9, %xmm2 ; SSE-NEXT: pandn %xmm0, %xmm2 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm5[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: pand %xmm9, %xmm0 ; SSE-NEXT: por %xmm0, %xmm2 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm2, %xmm0 ; SSE-NEXT: pshuflw $164, (%rsp), %xmm2 # 16-byte Folded Reload ; SSE-NEXT: # xmm2 = mem[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,1,3] ; SSE-NEXT: movdqa %xmm13, %xmm14 ; SSE-NEXT: pandn %xmm2, %xmm14 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm4[0,0,0,0] ; SSE-NEXT: pand %xmm13, %xmm2 ; SSE-NEXT: por %xmm2, %xmm14 ; SSE-NEXT: pand %xmm8, %xmm14 ; SSE-NEXT: por %xmm0, %xmm14 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0] ; SSE-NEXT: movdqa %xmm3, %xmm0 ; SSE-NEXT: pandn %xmm2, %xmm0 ; SSE-NEXT: pand %xmm3, %xmm14 ; SSE-NEXT: por %xmm14, %xmm0 ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm12[0,1,2,3,5,7,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,3,2] ; SSE-NEXT: movdqa %xmm3, %xmm5 ; SSE-NEXT: pandn %xmm2, %xmm5 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm4[0,1,2,3,7,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,2,2,2] ; SSE-NEXT: pand %xmm3, %xmm2 ; SSE-NEXT: por %xmm2, %xmm5 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm14 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[3,3,3,3] ; SSE-NEXT: pand %xmm10, %xmm2 ; SSE-NEXT: pshufhw $167, {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload ; SSE-NEXT: # xmm12 = mem[0,1,2,3,7,5,6,6] ; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm12[2,3,2,2] ; SSE-NEXT: pandn %xmm12, %xmm10 ; SSE-NEXT: por %xmm2, %xmm10 ; SSE-NEXT: movdqa %xmm6, %xmm1 ; SSE-NEXT: pand %xmm6, %xmm10 ; SSE-NEXT: pandn %xmm5, %xmm1 ; SSE-NEXT: por %xmm10, %xmm1 ; SSE-NEXT: pand %xmm11, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm5 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[3,3,3,3] ; SSE-NEXT: pandn %xmm2, %xmm11 ; SSE-NEXT: por %xmm5, %xmm11 ; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm4[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,0,0,0] ; SSE-NEXT: pand %xmm9, %xmm2 ; SSE-NEXT: pshuflw $225, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload ; SSE-NEXT: # xmm5 = mem[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,1] ; SSE-NEXT: pandn %xmm5, %xmm9 ; SSE-NEXT: por %xmm2, %xmm9 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm14[0,0,0,0] ; SSE-NEXT: pand %xmm13, %xmm2 ; SSE-NEXT: pshuflw $164, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload ; SSE-NEXT: # xmm5 = mem[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,1,3] ; SSE-NEXT: pandn %xmm5, %xmm13 ; SSE-NEXT: por %xmm2, %xmm13 ; SSE-NEXT: pand %xmm8, %xmm13 ; SSE-NEXT: pandn %xmm9, %xmm8 ; SSE-NEXT: por %xmm13, %xmm8 ; SSE-NEXT: pand %xmm3, %xmm8 ; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,0,0,0] ; SSE-NEXT: pandn %xmm2, %xmm3 ; SSE-NEXT: por %xmm8, %xmm3 ; SSE-NEXT: movdqa %xmm3, (%r9) ; SSE-NEXT: movdqa %xmm11, 64(%r9) ; SSE-NEXT: movdqa %xmm0, 80(%r9) ; SSE-NEXT: movdqa %xmm15, 144(%r9) ; SSE-NEXT: movdqa %xmm7, 16(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 48(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 96(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 128(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 32(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 112(%r9) ; SSE-NEXT: addq $152, %rsp ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: store_i8_stride5_vf32: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: vmovdqa 16(%rsi), %xmm10 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm11 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm11[8],xmm10[8],xmm11[9],xmm10[9],xmm11[10],xmm10[10],xmm11[11],xmm10[11],xmm11[12],xmm10[12],xmm11[13],xmm10[13],xmm11[14],xmm10[14],xmm11[15],xmm10[15] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm5 = [0,10,11,14,15,0,12,13,0,10,11,14,15,0,12,13] ; AVX1-ONLY-NEXT: # xmm5 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm0, %xmm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,u,u,u,6,7,u,u,u,8,9,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm1 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm1, %ymm2 ; AVX1-ONLY-NEXT: vmovaps %ymm1, %ymm7 ; AVX1-ONLY-NEXT: vmovdqa 16(%rcx), %xmm0 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdx), %xmm1 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm4 = [12,13,0,10,11,14,15,0,12,13,0,10,11,14,15,0] ; AVX1-ONLY-NEXT: # xmm4 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[2,u,u,u,5,4,u,u,u,7,6,u,u,u,9,8] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm6, %ymm3 ; AVX1-ONLY-NEXT: vandps %ymm7, %ymm3, %ymm3 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm3, %ymm2 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm3 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,xmm3[1,2,3,4],zero,xmm3[6,7,8,9],zero,xmm3[11,12,13,14],zero ; AVX1-ONLY-NEXT: vmovdqa 16(%r8), %xmm15 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm12 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15] ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm15, %xmm6 ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0],zero,xmm2[2,3,4,5],zero,xmm2[7,8,9,10],zero,xmm2[12,13,14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,xmm15[9],zero,zero,zero,zero,xmm15[10],zero,zero,zero,zero,xmm15[11],zero,zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa (%rcx), %xmm7 ; AVX1-ONLY-NEXT: vmovdqa (%rdx), %xmm8 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm2 = xmm8[8],xmm7[8],xmm8[9],xmm7[9],xmm8[10],xmm7[10],xmm8[11],xmm7[11],xmm8[12],xmm7[12],xmm8[13],xmm7[13],xmm8[14],xmm7[14],xmm8[15],xmm7[15] ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm6[u,u,0,1,u,u,u,2,3,u,u,u,4,5,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm14 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm11[0],xmm10[0],xmm11[1],xmm10[1],xmm11[2],xmm10[2],xmm11[3],xmm10[3],xmm11[4],xmm10[4],xmm11[5],xmm10[5],xmm11[6],xmm10[6],xmm11[7],xmm10[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm2[0,1,u,u,u,2,3,u,u,u,4,5,u,u,u,6] ; AVX1-ONLY-NEXT: vmovdqa (%rsi), %xmm2 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm3 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm3[8],xmm2[8],xmm3[9],xmm2[9],xmm3[10],xmm2[10],xmm3[11],xmm2[11],xmm3[12],xmm2[12],xmm3[13],xmm2[13],xmm3[14],xmm2[14],xmm3[15],xmm2[15] ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm4, %xmm5 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm5, %ymm5 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm13 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm14, %ymm13, %ymm14 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm13, %ymm5 ; AVX1-ONLY-NEXT: vorps %ymm5, %ymm14, %ymm5 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm14 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = xmm14[0,1,2,3],zero,xmm14[5,6,7,8],zero,xmm14[10,11,12,13],zero,xmm14[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,xmm15[0],zero,zero,zero,zero,xmm15[1],zero,zero,zero,zero,xmm15[2],zero ; AVX1-ONLY-NEXT: vpor %xmm9, %xmm14, %xmm4 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,xmm5[1,2,3,4],zero,xmm5[6,7,8,9],zero,xmm5[11,12,13,14],zero ; AVX1-ONLY-NEXT: vmovdqa (%r8), %xmm14 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm14, %xmm9 ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm9, %xmm4 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = zero,xmm0[6,u,u,u],zero,xmm0[7,u,u,u],zero,xmm0[8,u,u,u],zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[6],zero,xmm1[u,u,u,7],zero,xmm1[u,u,u,8],zero,xmm1[u,u,u,9] ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm9 = [0,6,7,10,11,0,8,9,0,6,7,10,11,0,8,9] ; AVX1-ONLY-NEXT: # xmm9 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm6, %xmm5 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm5, %ymm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm10[u,u,u],zero,xmm10[7,u,u,u],zero,xmm10[8,u,u,u],zero,xmm10[9,u] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [8,128,0,7,128,9,128,0,8,128,0,7,128,9,128,0] ; AVX1-ONLY-NEXT: # xmm6 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm11, %xmm12 ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm12, %xmm5 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3],xmm10[4],xmm11[4],xmm10[5],xmm11[5],xmm10[6],xmm11[6],xmm10[7],xmm11[7] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm12 = [6,11,10,0,9,8,13,12,6,11,10,0,9,8,13,12] ; AVX1-ONLY-NEXT: # xmm12 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm10, %xmm10 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm10, %ymm5 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm10 = [255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0] ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm10, %ymm1 ; AVX1-ONLY-NEXT: vandps %ymm5, %ymm10, %ymm5 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm5, %ymm5 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm10 = [0,1,2,128,4,5,6,7,128,9,10,11,12,128,14,15] ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm5, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm4 = [128,128,128,3,128,128,128,128,4,128,128,128,128,5,128,128] ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm15, %xmm11 ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm11, %xmm1 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm5, %xmm5 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm11 = [0,1,128,3,4,5,6,128,8,9,10,11,128,13,14,15] ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm5, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = [128,128,6,128,128,128,128,7,128,128,128,128,8,128,128,128] ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm15, %xmm15 ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm15, %xmm15 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7] ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm0, %xmm9 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,0,1,u,u,u,2,3,u,u,u,4,5,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm0, %ymm0 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3],xmm2[4],xmm3[4],xmm2[5],xmm3[5],xmm2[6],xmm3[6],xmm2[7],xmm3[7] ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm9, %xmm9 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm12 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3],xmm3[4],xmm2[4],xmm3[5],xmm2[5],xmm3[6],xmm2[6],xmm3[7],xmm2[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm12 = xmm12[0,1,u,u,u,2,3,u,u,u,4,5,u,u,u,6] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm9, %ymm12, %ymm9 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm12 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm12, %ymm0 ; AVX1-ONLY-NEXT: vandps %ymm12, %ymm9, %ymm9 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm9, %ymm9 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm9, %xmm0 ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpshufb %xmm4, %xmm14, %xmm4 ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm9[0,1,2,3],zero,xmm9[5,6,7,8],zero,xmm9[10,11,12,13],zero,xmm9[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm9 = zero,zero,zero,zero,xmm14[0],zero,zero,zero,zero,xmm14[1],zero,zero,zero,zero,xmm14[2],zero ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm9, %xmm4 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,u],zero,xmm2[7,u,u,u],zero,xmm2[8,u,u,u],zero,xmm2[9,u] ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,4,5,u,u,u,6,7,u,u,u,8,9,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm2, %ymm2 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = zero,xmm7[6,u,u,u],zero,xmm7[7,u,u,u],zero,xmm7[8,u,u,u],zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm8[6],zero,xmm8[u,u,u,7],zero,xmm8[u,u,u,8],zero,xmm8[u,u,u,9] ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm6, %xmm3 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm7[8],xmm8[8],xmm7[9],xmm8[9],xmm7[10],xmm8[10],xmm7[11],xmm8[11],xmm7[12],xmm8[12],xmm7[13],xmm8[13],xmm7[14],xmm8[14],xmm7[15],xmm8[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[2,u,u,u,5,4,u,u,u,7,6,u,u,u,9,8] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm3, %ymm3 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm13, %ymm2 ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm13, %ymm3 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm3, %ymm2 ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm2, %xmm3 ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm14, %xmm5 ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm2 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[0],zero,xmm2[2,3,4,5],zero,xmm2[7,8,9,10],zero,xmm2[12,13,14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,xmm14[9],zero,zero,zero,zero,xmm14[10],zero,zero,zero,zero,xmm14[11],zero,zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 48(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 32(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm4, (%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 16(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm15, 112(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm1, 96(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 64(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 80(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 128(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 144(%r9) ; AVX1-ONLY-NEXT: vzeroupper ; AVX1-ONLY-NEXT: retq ; ; AVX2-SLOW-LABEL: store_i8_stride5_vf32: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm3 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm4 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm1 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm2 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm0 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm1[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14] ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm6[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm7 = [255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0,255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0] ; AVX2-SLOW-NEXT: # ymm7 = mem[0,1,0,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm7 = ymm3[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15] ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm7 = ymm7[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255,255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255] ; AVX2-SLOW-NEXT: # ymm8 = mem[0,1,0,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u] ; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm6 = ymm0[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX2-SLOW-NEXT: vpblendvb %ymm7, %ymm5, %ymm6, %ymm5 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm7 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm8 ; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1] ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm9 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm10 ; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm10[0],xmm9[0],xmm10[1],xmm9[1],xmm10[2],xmm9[2],xmm10[3],xmm9[3],xmm10[4],xmm9[4],xmm10[5],xmm9[5],xmm10[6],xmm9[6],xmm10[7],xmm9[7] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm11 = xmm11[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm12, %ymm6, %ymm11, %ymm6 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm11 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm12 = xmm11[0,0,1,1] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[0,0,0,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm13, %ymm6, %ymm12, %ymm6 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm10 = zero,xmm10[6],zero,xmm10[8,u],zero,xmm10[7],zero,xmm10[9],zero,xmm10[11,u],zero,xmm10[10],zero,xmm10[12] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[6],zero,xmm9[8],zero,xmm9[u,7],zero,xmm9[9],zero,xmm9[11],zero,xmm9[u,10],zero,xmm9[12],zero ; AVX2-SLOW-NEXT: vpor %xmm10, %xmm9, %xmm9 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[8],zero,xmm8[u,7],zero,xmm8[9],zero,xmm8[u],zero,xmm8[u,10],zero,xmm8[12],zero,xmm8[u,11] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm7[8,u],zero,xmm7[7],zero,xmm7[9,u,11,u],zero,xmm7[10],zero,xmm7[12,u],zero ; AVX2-SLOW-NEXT: vpor %xmm7, %xmm8, %xmm7 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm9, %ymm7, %ymm7 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm8 = xmm11[1,1,2,2] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,1,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-SLOW-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm10 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3] ; AVX2-SLOW-NEXT: vpor %ymm9, %ymm10, %ymm9 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0] ; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,3,2] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,u,4,4,4,4] ; AVX2-SLOW-NEXT: vpermd %ymm3, %ymm9, %ymm3 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm9, %ymm3, %ymm4, %ymm3 ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm1[13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,ymm1[18],zero,zero,zero ; AVX2-SLOW-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [3,3,3,3,u,4,4,4] ; AVX2-SLOW-NEXT: vpermd %ymm0, %ymm2, %ymm0 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 64(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm8, 96(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm5, 128(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm7, 32(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm6, (%r9) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: store_i8_stride5_vf32: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm3 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm4 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm1 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm2 ; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm0 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm6 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm7 ; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1] ; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm8 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm9 ; AVX2-FAST-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm11, %ymm5, %ymm10, %ymm5 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = [0,0,0,0,0,0,1,1] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm10, %ymm10 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm11, %ymm5, %ymm10, %ymm5 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm9 = zero,xmm9[6],zero,xmm9[8,u],zero,xmm9[7],zero,xmm9[9],zero,xmm9[11,u],zero,xmm9[10],zero,xmm9[12] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6],zero,xmm8[8],zero,xmm8[u,7],zero,xmm8[9],zero,xmm8[11],zero,xmm8[u,10],zero,xmm8[12],zero ; AVX2-FAST-NEXT: vpor %xmm9, %xmm8, %xmm8 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[u],zero,xmm7[u,10],zero,xmm7[12],zero,xmm7[u,11] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} xmm6 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero ; AVX2-FAST-NEXT: vpor %xmm7, %xmm6, %xmm6 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm8, %ymm6, %ymm6 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [1,1,2,2,2,2,2,2] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm7, %ymm7 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0] ; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [4,6,5,5,5,5,4,6] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm8, %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,ymm1[29,26],zero,ymm1[28],zero,ymm1[30],zero,ymm1[28,29],zero,ymm1[31],zero,ymm1[29] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[29,26],zero,ymm3[28],zero,ymm3[26,27,28,29],zero,ymm3[31],zero,ymm3[29,30],zero ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm9, %ymm10, %ymm9 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u] ; AVX2-FAST-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [6,6,6,6,7,7,7,7] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm9, %ymm9 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX2-FAST-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,u,4,4,4,4] ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm9, %ymm3 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255] ; AVX2-FAST-NEXT: vpblendvb %ymm9, %ymm3, %ymm4, %ymm3 ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm1[13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,ymm1[18],zero,zero,zero ; AVX2-FAST-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255] ; AVX2-FAST-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [3,3,3,3,u,4,4,4] ; AVX2-FAST-NEXT: vpermd %ymm0, %ymm2, %ymm0 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX2-FAST-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovdqa %ymm0, 64(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm8, 128(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm7, 96(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm6, 32(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm5, (%r9) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: store_i8_stride5_vf32: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm2 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm6 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm7 ; AVX2-FAST-PERLANE-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm7[0],xmm6[0],xmm7[1],xmm6[1],xmm7[2],xmm6[2],xmm7[3],xmm6[3],xmm7[4],xmm6[4],xmm7[5],xmm6[5],xmm7[6],xmm6[6],xmm7[7],xmm6[7] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm9 ; AVX2-FAST-PERLANE-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm11, %ymm5, %ymm10, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm10 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm11 = xmm10[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,0,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm12, %ymm5, %ymm11, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm9 = zero,xmm9[6],zero,xmm9[8,u],zero,xmm9[7],zero,xmm9[9],zero,xmm9[11,u],zero,xmm9[10],zero,xmm9[12] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6],zero,xmm8[8],zero,xmm8[u,7],zero,xmm8[9],zero,xmm8[11],zero,xmm8[u,10],zero,xmm8[12],zero ; AVX2-FAST-PERLANE-NEXT: vpor %xmm9, %xmm8, %xmm8 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[u],zero,xmm7[u,10],zero,xmm7[12],zero,xmm7[u,11] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} xmm6 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero ; AVX2-FAST-PERLANE-NEXT: vpor %xmm7, %xmm6, %xmm6 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm8, %ymm6, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm7 = xmm10[1,1,2,2] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,1,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm8, %ymm6, %ymm7, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm7 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[21],zero,ymm3[19,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25],zero,ymm3[23] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[19],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm8 = ymm0[0,2,1,1,4,6,5,5] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm9, %ymm7, %ymm8, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[27],zero,ymm1[29,26],zero,ymm1[28],zero,ymm1[30],zero,ymm1[28,29],zero,ymm1[31],zero,ymm1[29] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm9 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm9 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[29,26],zero,ymm3[28],zero,ymm3[26,27,28,29],zero,ymm3[31],zero,ymm3[29,30],zero ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm10 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm9, %ymm10, %ymm9 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[2,2,3,3,6,6,7,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = [3,3,3,u,4,4,4,4] ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm3, %ymm9, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm9, %ymm3, %ymm4, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm2 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = zero,zero,zero,ymm1[13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,zero,ymm1[15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,zero,ymm1[17],zero,zero,zero,zero,ymm1[18],zero,zero,zero ; AVX2-FAST-PERLANE-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm2, %ymm3, %ymm1, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = [3,3,3,3,u,4,4,4] ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm0, %ymm2, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm2, %ymm1, %ymm0, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 64(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm8, 128(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm7, 96(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm6, 32(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm5, (%r9) ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512F-SLOW-LABEL: store_i8_stride5_vf32: ; AVX512F-SLOW: # %bb.0: ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm1 ; AVX512F-SLOW-NEXT: vmovdqa (%rsi), %ymm2 ; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %ymm3 ; AVX512F-SLOW-NEXT: vmovdqa (%rcx), %ymm4 ; AVX512F-SLOW-NEXT: vmovdqa (%r8), %ymm0 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm5 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm5[8],zero,xmm5[u,7],zero,xmm5[9],zero,xmm5[u],zero,xmm5[u,10],zero,xmm5[12],zero,xmm5[u,11] ; AVX512F-SLOW-NEXT: vmovdqa (%rsi), %xmm7 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm7[8,u],zero,xmm7[7],zero,xmm7[9,u,11,u],zero,xmm7[10],zero,xmm7[12,u],zero ; AVX512F-SLOW-NEXT: vpor %xmm6, %xmm8, %xmm6 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1] ; AVX512F-SLOW-NEXT: vmovdqa (%rcx), %xmm8 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = zero,xmm8[6],zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9],zero,xmm8[11,u],zero,xmm8[10],zero,xmm8[12] ; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %xmm10 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm11 = xmm10[6],zero,xmm10[8],zero,xmm10[u,7],zero,xmm10[9],zero,xmm10[11],zero,xmm10[u,10],zero,xmm10[12],zero ; AVX512F-SLOW-NEXT: vpor %xmm9, %xmm11, %xmm9 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,0,1,1] ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; AVX512F-SLOW-NEXT: vpternlogq $226, %ymm6, %ymm11, %ymm9 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm9, %zmm0, %zmm6 ; AVX512F-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1] ; AVX512F-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm5[0],xmm7[0],xmm5[1],xmm7[1],xmm5[2],xmm7[2],xmm5[3],xmm7[3],xmm5[4],xmm7[4],xmm5[5],xmm7[5],xmm5[6],xmm7[6],xmm5[7],xmm7[7] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1] ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX512F-SLOW-NEXT: vpternlogq $226, %ymm8, %ymm7, %ymm5 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm5[0,1,2,3],zmm6[4,5,6,7] ; AVX512F-SLOW-NEXT: vmovdqa (%r8), %xmm6 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm8 = [u,0,0,0,0,u,1,1,1,1,u,2,2,2,2,u] ; AVX512F-SLOW-NEXT: vpermd %zmm6, %zmm8, %zmm6 ; AVX512F-SLOW-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm6 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u],zero,ymm4[13,u,u,u],zero,ymm4[14,u,u,u],zero,ymm4[15,u,u,u],zero,ymm4[16,u,u,u],zero,ymm4[17,u,u,u],zero,ymm4[18,u,u] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm3[u,u,u,13],zero,ymm3[u,u,u,14],zero,ymm3[u,u,u,15],zero,ymm3[u,u,u,16],zero,ymm3[u,u,u,17],zero,ymm3[u,u,u,18],zero,ymm3[u,u] ; AVX512F-SLOW-NEXT: vpor %ymm5, %ymm8, %ymm5 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u],zero,ymm2[13,u,u,u],zero,ymm2[14,u,u,u],zero,ymm2[15,u,u,u],zero,ymm2[16,u,u,u],zero,ymm2[17,u,u,u],zero,ymm2[18,u,u,u],zero ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,13],zero,ymm1[u,u,u,14],zero,ymm1[u,u,u,15],zero,ymm1[u,u,u,16],zero,ymm1[u,u,u,17],zero,ymm1[u,u,u,18],zero,ymm1[u,u,u,19] ; AVX512F-SLOW-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX512F-SLOW-NEXT: vpternlogq $226, %ymm5, %ymm11, %ymm8 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm9 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400] ; AVX512F-SLOW-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm11 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm3[21],zero,ymm3[21,20],zero,ymm3[22],zero,ymm3[24],zero,ymm3[22,23],zero,ymm3[25] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3] ; AVX512F-SLOW-NEXT: vpor %ymm5, %ymm11, %ymm5 ; AVX512F-SLOW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5 ; AVX512F-SLOW-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm0[12],zero,zero,zero,zero,ymm0[13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,zero,ymm0[15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,zero,ymm0[17],zero,zero,zero,zero,ymm0[18],zero ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = ymm0[0,2,1,1,4,6,5,5] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,3,3,2] ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX512F-SLOW-NEXT: vpandn %ymm9, %ymm11, %ymm9 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm9, %zmm8, %zmm8 ; AVX512F-SLOW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm3[27],zero,zero,ymm3[26],zero,ymm3[28],zero,ymm3[30],zero,zero,ymm3[29],zero,ymm3[31],zero,zero ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3] ; AVX512F-SLOW-NEXT: vpternlogq $248, %ymm10, %ymm4, %ymm3 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX512F-SLOW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1 ; AVX512F-SLOW-NEXT: vpternlogq $184, %ymm3, %ymm7, %ymm1 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX512F-SLOW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 ; AVX512F-SLOW-NEXT: vmovdqa %ymm0, 128(%r9) ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm8, 64(%r9) ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm6, (%r9) ; AVX512F-SLOW-NEXT: vzeroupper ; AVX512F-SLOW-NEXT: retq ; ; AVX512F-FAST-LABEL: store_i8_stride5_vf32: ; AVX512F-FAST: # %bb.0: ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512F-FAST-NEXT: vmovdqa (%rsi), %ymm1 ; AVX512F-FAST-NEXT: vmovdqa (%rdx), %ymm2 ; AVX512F-FAST-NEXT: vmovdqa (%rcx), %ymm3 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm4 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm4[8],zero,xmm4[u,7],zero,xmm4[9],zero,xmm4[u],zero,xmm4[u,10],zero,xmm4[12],zero,xmm4[u,11] ; AVX512F-FAST-NEXT: vmovdqa (%rsi), %xmm6 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero ; AVX512F-FAST-NEXT: vpor %xmm5, %xmm7, %xmm5 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[0,0,1,1] ; AVX512F-FAST-NEXT: vmovdqa (%rcx), %xmm7 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm7[6],zero,xmm7[8,u],zero,xmm7[7],zero,xmm7[9],zero,xmm7[11,u],zero,xmm7[10],zero,xmm7[12] ; AVX512F-FAST-NEXT: vmovdqa (%rdx), %xmm9 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm10 = xmm9[6],zero,xmm9[8],zero,xmm9[u,7],zero,xmm9[9],zero,xmm9[11],zero,xmm9[u,10],zero,xmm9[12],zero ; AVX512F-FAST-NEXT: vpor %xmm8, %xmm10, %xmm8 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,0,1,1] ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; AVX512F-FAST-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm8, %zmm0, %zmm5 ; AVX512F-FAST-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,0,1,1] ; AVX512F-FAST-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3],xmm4[4],xmm6[4],xmm4[5],xmm6[5],xmm4[6],xmm6[6],xmm4[7],xmm6[7] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX512F-FAST-NEXT: vpternlogq $226, %ymm7, %ymm6, %ymm4 ; AVX512F-FAST-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm4[0,1,2,3],zmm5[4,5,6,7] ; AVX512F-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm4 = mem[0,1,2,3,0,1,2,3] ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u] ; AVX512F-FAST-NEXT: vpermd %zmm4, %zmm7, %zmm7 ; AVX512F-FAST-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm7 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u],zero,ymm3[13,u,u,u],zero,ymm3[14,u,u,u],zero,ymm3[15,u,u,u],zero,ymm3[16,u,u,u],zero,ymm3[17,u,u,u],zero,ymm3[18,u,u] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm2[u,u,u,13],zero,ymm2[u,u,u,14],zero,ymm2[u,u,u,15],zero,ymm2[u,u,u,16],zero,ymm2[u,u,u,17],zero,ymm2[u,u,u,18],zero,ymm2[u,u] ; AVX512F-FAST-NEXT: vpor %ymm5, %ymm8, %ymm5 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u],zero,ymm1[13,u,u,u],zero,ymm1[14,u,u,u],zero,ymm1[15,u,u,u],zero,ymm1[16,u,u,u],zero,ymm1[17,u,u,u],zero,ymm1[18,u,u,u],zero ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,13],zero,ymm0[u,u,u,14],zero,ymm0[u,u,u,15],zero,ymm0[u,u,u,16],zero,ymm0[u,u,u,17],zero,ymm0[u,u,u,18],zero,ymm0[u,u,u,19] ; AVX512F-FAST-NEXT: vpor %ymm8, %ymm9, %ymm8 ; AVX512F-FAST-NEXT: vpternlogq $226, %ymm5, %ymm10, %ymm8 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm9 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[21],zero,zero,ymm0[20],zero,ymm0[22],zero,ymm0[24],zero,zero,ymm0[23],zero,ymm0[25],zero,zero ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm10 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400] ; AVX512F-FAST-NEXT: vpternlogq $248, %ymm10, %ymm5, %ymm9 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm5 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm3[19],zero,ymm3[21],zero,zero,ymm3[20],zero,ymm3[22],zero,ymm3[24],zero,zero,ymm3[23],zero ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm11 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm2[21],zero,ymm2[21,20],zero,ymm2[22],zero,ymm2[24],zero,ymm2[22,23],zero,ymm2[25] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3] ; AVX512F-FAST-NEXT: vpor %ymm5, %ymm11, %ymm5 ; AVX512F-FAST-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm9, %ymm5 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm5, %zmm0, %zmm5 ; AVX512F-FAST-NEXT: vshufi64x2 {{.*#+}} zmm5 = zmm8[0,1,2,3],zmm5[4,5,6,7] ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [4,u,5,5,5,5,u,6] ; AVX512F-FAST-NEXT: vpermd %ymm4, %ymm8, %ymm8 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX512F-FAST-NEXT: vpandn %ymm8, %ymm9, %ymm8 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm9 = ymm4[12],zero,zero,zero,zero,ymm4[13],zero,zero,zero,zero,ymm4[14],zero,zero,zero,zero,ymm4[15],zero,zero,zero,zero,ymm4[16],zero,zero,zero,zero,ymm4[17],zero,zero,zero,zero,ymm4[18],zero ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8 ; AVX512F-FAST-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm5, %zmm8 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero,zero ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512F-FAST-NEXT: vpternlogq $248, %ymm10, %ymm3, %ymm2 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm0[26],zero,ymm0[28],zero,zero,ymm0[27],zero,ymm0[29],zero,ymm0[31],zero,zero,ymm0[30],zero ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX512F-FAST-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm1, %ymm0 ; AVX512F-FAST-NEXT: vpternlogq $184, %ymm2, %ymm6, %ymm0 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [6,6,6,u,7,7,7,7] ; AVX512F-FAST-NEXT: vpermd %ymm4, %ymm1, %ymm1 ; AVX512F-FAST-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm1 ; AVX512F-FAST-NEXT: vmovdqa %ymm1, 128(%r9) ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, 64(%r9) ; AVX512F-FAST-NEXT: vmovdqa64 %zmm7, (%r9) ; AVX512F-FAST-NEXT: vzeroupper ; AVX512F-FAST-NEXT: retq ; ; AVX512BW-SLOW-LABEL: store_i8_stride5_vf32: ; AVX512BW-SLOW: # %bb.0: ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm4 ; AVX512BW-SLOW-NEXT: vmovdqa (%rsi), %ymm5 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdx), %ymm1 ; AVX512BW-SLOW-NEXT: vmovdqa (%rcx), %ymm2 ; AVX512BW-SLOW-NEXT: vmovdqa (%r8), %ymm0 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %xmm3 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm6 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11] ; AVX512BW-SLOW-NEXT: vmovdqa (%rsi), %xmm7 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm7[8,u],zero,xmm7[7],zero,xmm7[9,u,11,u],zero,xmm7[10],zero,xmm7[12,u],zero ; AVX512BW-SLOW-NEXT: vpor %xmm6, %xmm8, %xmm6 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdx), %xmm8 ; AVX512BW-SLOW-NEXT: vmovdqa (%rcx), %xmm9 ; AVX512BW-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm9[0],xmm8[0],xmm9[1],xmm8[1],xmm9[2],xmm8[2],xmm9[3],xmm8[3],xmm9[4],xmm8[4],xmm9[5],xmm8[5],xmm9[6],xmm8[6],xmm9[7],xmm8[7] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm10 = xmm10[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm6, %zmm10, %zmm6 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm9 = zero,xmm9[6],zero,xmm9[8,u],zero,xmm9[7],zero,xmm9[9],zero,xmm9[11,u],zero,xmm9[10],zero,xmm9[12] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm8 = xmm8[6],zero,xmm8[8],zero,xmm8[u,7],zero,xmm8[9],zero,xmm8[11],zero,xmm8[u,10],zero,xmm8[12],zero ; AVX512BW-SLOW-NEXT: vpor %xmm9, %xmm8, %xmm8 ; AVX512BW-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3],xmm3[4],xmm7[4],xmm3[5],xmm7[5],xmm3[6],xmm7[6],xmm3[7],xmm7[7] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm8, %zmm3, %zmm3 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5] ; AVX512BW-SLOW-NEXT: movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1} ; AVX512BW-SLOW-NEXT: vmovdqa (%r8), %xmm6 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2] ; AVX512BW-SLOW-NEXT: vpermd %zmm6, %zmm7, %zmm6 ; AVX512BW-SLOW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1} ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm1[21],zero,ymm1[21,20],zero,ymm1[22],zero,ymm1[24],zero,ymm1[22,23],zero,ymm1[25] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpor %ymm6, %ymm7, %ymm6 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm1[12,13],zero,zero,zero,zero,ymm1[14],zero,zero,zero,ymm1[14,15],zero,zero,zero,zero,ymm1[16],zero,zero,zero,ymm1[16,17],zero,zero,zero,zero,ymm1[18],zero,zero,zero ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero ; AVX512BW-SLOW-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm5[21],zero,zero,ymm5[20],zero,ymm5[22],zero,ymm5[24],zero,zero,ymm5[23],zero,ymm5[25],zero ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm8 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,u,4,4,4,4] ; AVX512BW-SLOW-NEXT: vpermd %ymm4, %ymm8, %ymm8 ; AVX512BW-SLOW-NEXT: movl $138547332, %eax # imm = 0x8421084 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm5[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7 ; AVX512BW-SLOW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1} ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,3,u,4,4,4] ; AVX512BW-SLOW-NEXT: vpermd %ymm0, %ymm6, %ymm6 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm8 = ymm0[0,2,1,1,4,6,5,5] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm8, %zmm6, %zmm6 ; AVX512BW-SLOW-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1} ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm5 = ymm5[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX512BW-SLOW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15] ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7] ; AVX512BW-SLOW-NEXT: movl $1251232404, %eax # imm = 0x4A944A94 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm4, %ymm5 {%k1} ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm5[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,u,27,u,u,26,u,28,u,30,u,u,29,u,31,u] ; AVX512BW-SLOW-NEXT: vpshufhw {{.*#+}} ymm1 = ymm1[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14] ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[2,2,3,3,6,6,7,7] ; AVX512BW-SLOW-NEXT: movl $693250386, %eax # imm = 0x29522952 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm1, %ymm2 {%k1} ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm2[2,2,3,3] ; AVX512BW-SLOW-NEXT: movl $415641996, %eax # imm = 0x18C6318C ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm4, %ymm1 {%k1} ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[2,2,3,3,6,6,7,7] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX512BW-SLOW-NEXT: movl $-2078209982, %eax # imm = 0x84210842 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm0, %ymm1 {%k1} ; AVX512BW-SLOW-NEXT: vmovdqa %ymm1, 128(%r9) ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm7, 64(%r9) ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm3, (%r9) ; AVX512BW-SLOW-NEXT: vzeroupper ; AVX512BW-SLOW-NEXT: retq ; ; AVX512BW-FAST-LABEL: store_i8_stride5_vf32: ; AVX512BW-FAST: # %bb.0: ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %ymm1 ; AVX512BW-FAST-NEXT: vmovdqa (%rsi), %ymm4 ; AVX512BW-FAST-NEXT: vmovdqa (%rdx), %ymm0 ; AVX512BW-FAST-NEXT: vmovdqa (%rcx), %ymm2 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %xmm3 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm5 = xmm3[8],zero,xmm3[u,7],zero,xmm3[9],zero,xmm3[u],zero,xmm3[u,10],zero,xmm3[12],zero,xmm3[u,11] ; AVX512BW-FAST-NEXT: vmovdqa (%rsi), %xmm6 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm7 = zero,xmm6[8,u],zero,xmm6[7],zero,xmm6[9,u,11,u],zero,xmm6[10],zero,xmm6[12,u],zero ; AVX512BW-FAST-NEXT: vpor %xmm5, %xmm7, %xmm5 ; AVX512BW-FAST-NEXT: vmovdqa (%rdx), %xmm7 ; AVX512BW-FAST-NEXT: vmovdqa (%rcx), %xmm8 ; AVX512BW-FAST-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm8[0],xmm7[0],xmm8[1],xmm7[1],xmm8[2],xmm7[2],xmm8[3],xmm7[3],xmm8[4],xmm7[4],xmm8[5],xmm7[5],xmm8[6],xmm7[6],xmm8[7],xmm7[7] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm9 = xmm9[2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm5, %zmm9, %zmm5 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm8 = zero,xmm8[6],zero,xmm8[8,u],zero,xmm8[7],zero,xmm8[9],zero,xmm8[11,u],zero,xmm8[10],zero,xmm8[12] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[6],zero,xmm7[8],zero,xmm7[u,7],zero,xmm7[9],zero,xmm7[11],zero,xmm7[u,10],zero,xmm7[12],zero ; AVX512BW-FAST-NEXT: vpor %xmm7, %xmm8, %xmm7 ; AVX512BW-FAST-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3],xmm3[4],xmm6[4],xmm3[5],xmm6[5],xmm3[6],xmm6[6],xmm3[7],xmm6[7] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm7, %zmm3, %zmm3 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm3 = zmm3[0,0,1,1,4,4,5,5] ; AVX512BW-FAST-NEXT: movabsq $3570337559743967628, %rax # imm = 0x318C631818C6318C ; AVX512BW-FAST-NEXT: kmovq %rax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm5, %zmm3 {%k1} ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm6 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10] ; AVX512BW-FAST-NEXT: vpermd %zmm5, %zmm6, %zmm6 ; AVX512BW-FAST-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210 ; AVX512BW-FAST-NEXT: kmovq %rax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm6, %zmm3 {%k1} ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,18,19],zero,ymm0[21],zero,ymm0[21,20],zero,ymm0[22],zero,ymm0[24],zero,ymm0[22,23],zero,ymm0[25] ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm7 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[19],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512BW-FAST-NEXT: vpor %ymm6, %ymm7, %ymm6 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm7 = zero,zero,ymm0[12,13],zero,zero,zero,zero,ymm0[14],zero,zero,zero,ymm0[14,15],zero,zero,zero,zero,ymm0[16],zero,zero,zero,ymm0[16,17],zero,zero,zero,zero,ymm0[18],zero,zero,zero ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm8 = zero,zero,zero,zero,ymm2[13],zero,zero,zero,zero,ymm2[14],zero,zero,zero,zero,ymm2[15],zero,zero,zero,zero,ymm2[16],zero,zero,zero,zero,ymm2[17],zero,zero,zero,zero,ymm2[18],zero,zero ; AVX512BW-FAST-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm6, %zmm7, %zmm6 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm7 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm8 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX512BW-FAST-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [3,3,3,u,4,4,4,4] ; AVX512BW-FAST-NEXT: vpermd %ymm1, %ymm8, %ymm8 ; AVX512BW-FAST-NEXT: movl $138547332, %eax # imm = 0x8421084 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm8 {%k1} = ymm4[u,u,13,u,u,u,u,14,u,u,u,u,15,u,u,u,u,16,u,u,u,u,17,u,u,u,u,18,u,u,u,u] ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm7, %zmm8, %zmm7 ; AVX512BW-FAST-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318 ; AVX512BW-FAST-NEXT: kmovq %rax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1} ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm6 = [3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14] ; AVX512BW-FAST-NEXT: vpermd %zmm5, %zmm6, %zmm6 ; AVX512BW-FAST-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421 ; AVX512BW-FAST-NEXT: kmovq %rax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm6, %zmm7 {%k1} ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm4 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,zero,zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30] ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX512BW-FAST-NEXT: vpor %ymm4, %ymm1, %ymm1 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm2[27],zero,zero,ymm2[26],zero,ymm2[28],zero,ymm2[30],zero,zero,ymm2[29],zero,ymm2[31],zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm0[27],zero,zero,ymm0[26],zero,ymm0[28],zero,ymm0[30],zero,zero,ymm0[29],zero,ymm0[31],zero,zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX512BW-FAST-NEXT: vpor %ymm2, %ymm0, %ymm0 ; AVX512BW-FAST-NEXT: movl $415641996, %eax # imm = 0x18C6318C ; AVX512BW-FAST-NEXT: kmovd %eax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1} ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [6,6,6,6,7,7,7,7] ; AVX512BW-FAST-NEXT: vpermd %ymm5, %ymm1, %ymm1 ; AVX512BW-FAST-NEXT: movl $-2078209982, %eax # imm = 0x84210842 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %ymm1, %ymm0 {%k1} ; AVX512BW-FAST-NEXT: vmovdqa %ymm0, 128(%r9) ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm7, 64(%r9) ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm3, (%r9) ; AVX512BW-FAST-NEXT: vzeroupper ; AVX512BW-FAST-NEXT: retq %in.vec0 = load <32 x i8>, ptr %in.vecptr0, align 64 %in.vec1 = load <32 x i8>, ptr %in.vecptr1, align 64 %in.vec2 = load <32 x i8>, ptr %in.vecptr2, align 64 %in.vec3 = load <32 x i8>, ptr %in.vecptr3, align 64 %in.vec4 = load <32 x i8>, ptr %in.vecptr4, align 64 %1 = shufflevector <32 x i8> %in.vec0, <32 x i8> %in.vec1, <64 x i32> %2 = shufflevector <32 x i8> %in.vec2, <32 x i8> %in.vec3, <64 x i32> %3 = shufflevector <64 x i8> %1, <64 x i8> %2, <128 x i32> %4 = shufflevector <32 x i8> %in.vec4, <32 x i8> poison, <128 x i32> %5 = shufflevector <128 x i8> %3, <128 x i8> %4, <160 x i32> %interleaved.vec = shufflevector <160 x i8> %5, <160 x i8> poison, <160 x i32> store <160 x i8> %interleaved.vec, ptr %out.vec, align 64 ret void } define void @store_i8_stride5_vf64(ptr %in.vecptr0, ptr %in.vecptr1, ptr %in.vecptr2, ptr %in.vecptr3, ptr %in.vecptr4, ptr %out.vec) nounwind { ; SSE-LABEL: store_i8_stride5_vf64: ; SSE: # %bb.0: ; SSE-NEXT: subq $504, %rsp # imm = 0x1F8 ; SSE-NEXT: movdqa (%rdi), %xmm7 ; SSE-NEXT: movdqa (%rsi), %xmm9 ; SSE-NEXT: movdqa 16(%rsi), %xmm14 ; SSE-NEXT: movdqa (%rdx), %xmm0 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; SSE-NEXT: movdqa 16(%rdx), %xmm11 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa (%rcx), %xmm10 ; SSE-NEXT: movdqa 16(%rcx), %xmm6 ; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa (%r8), %xmm13 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa {{.*#+}} xmm12 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; SSE-NEXT: pand %xmm12, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[2,1,2,3] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,0,3] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa %xmm12, %xmm4 ; SSE-NEXT: pandn %xmm1, %xmm4 ; SSE-NEXT: por %xmm0, %xmm4 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: pand %xmm8, %xmm4 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm7[1,1,2,2] ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,1,2,1] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,6,7] ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,0] ; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: pandn %xmm1, %xmm5 ; SSE-NEXT: por %xmm0, %xmm5 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: por %xmm4, %xmm0 ; SSE-NEXT: movdqa {{.*#+}} xmm15 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; SSE-NEXT: pand %xmm15, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[1,1,2,2] ; SSE-NEXT: movdqa %xmm15, %xmm3 ; SSE-NEXT: pandn %xmm1, %xmm3 ; SSE-NEXT: por %xmm0, %xmm3 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm11[3,3,3,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: pand %xmm12, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm6[2,1,2,3] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[3,3,0,3] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa %xmm12, %xmm5 ; SSE-NEXT: pandn %xmm1, %xmm5 ; SSE-NEXT: por %xmm0, %xmm5 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,1,2,1] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] ; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: movdqa 16(%rdi), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: pand %xmm8, %xmm5 ; SSE-NEXT: por %xmm5, %xmm0 ; SSE-NEXT: movdqa 16(%r8), %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] ; SSE-NEXT: movdqa %xmm15, %xmm3 ; SSE-NEXT: pandn %xmm1, %xmm3 ; SSE-NEXT: pand %xmm15, %xmm0 ; SSE-NEXT: por %xmm0, %xmm3 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 32(%rcx), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,0,3] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa %xmm12, %xmm1 ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: movdqa 32(%rdx), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: pand %xmm12, %xmm0 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: movdqa 32(%rsi), %xmm11 ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm11[0,1,2,1] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] ; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: pandn %xmm0, %xmm5 ; SSE-NEXT: movdqa 32(%rdi), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: por %xmm0, %xmm5 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: pand %xmm8, %xmm1 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: movdqa 32(%r8), %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] ; SSE-NEXT: movdqa %xmm15, %xmm3 ; SSE-NEXT: pandn %xmm1, %xmm3 ; SSE-NEXT: pand %xmm15, %xmm0 ; SSE-NEXT: por %xmm0, %xmm3 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 48(%rcx), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[3,3,0,3] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: movdqa %xmm12, %xmm1 ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: movdqa 48(%rdx), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,3,3,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,4,4,4] ; SSE-NEXT: pand %xmm12, %xmm0 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: movdqa 48(%rsi), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,7,6,7] ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,0] ; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: pandn %xmm0, %xmm5 ; SSE-NEXT: movdqa 48(%rdi), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,2,2] ; SSE-NEXT: pand %xmm2, %xmm0 ; SSE-NEXT: por %xmm0, %xmm5 ; SSE-NEXT: movdqa %xmm8, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: pand %xmm8, %xmm1 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: movdqa 48(%r8), %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] ; SSE-NEXT: movdqa %xmm15, %xmm5 ; SSE-NEXT: pandn %xmm1, %xmm5 ; SSE-NEXT: pand %xmm15, %xmm0 ; SSE-NEXT: por %xmm0, %xmm5 ; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa %xmm10, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm10[0],xmm0[1],xmm10[1],xmm0[2],xmm10[2],xmm0[3],xmm10[3],xmm0[4],xmm10[4],xmm0[5],xmm10[5],xmm0[6],xmm10[6],xmm0[7],xmm10[7] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5] ; SSE-NEXT: movdqa %xmm15, %xmm1 ; SSE-NEXT: pandn %xmm0, %xmm1 ; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm3[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: pand %xmm15, %xmm0 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm6 = [255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; SSE-NEXT: movdqa %xmm6, %xmm0 ; SSE-NEXT: pandn %xmm1, %xmm0 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[1,1,1,1] ; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: pandn %xmm1, %xmm5 ; SSE-NEXT: movdqa %xmm9, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm9[0],xmm1[1],xmm9[1],xmm1[2],xmm9[2],xmm1[3],xmm9[3],xmm1[4],xmm9[4],xmm1[5],xmm9[5],xmm1[6],xmm9[6],xmm1[7],xmm9[7] ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: por %xmm5, %xmm1 ; SSE-NEXT: pand %xmm6, %xmm1 ; SSE-NEXT: por %xmm0, %xmm1 ; SSE-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[0,0,1,1] ; SSE-NEXT: movdqa %xmm4, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: pand %xmm4, %xmm1 ; SSE-NEXT: por %xmm1, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm9[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,2,2,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,5,4,7] ; SSE-NEXT: movdqa %xmm4, %xmm5 ; SSE-NEXT: pandn %xmm1, %xmm5 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,2,3,3] ; SSE-NEXT: pand %xmm4, %xmm1 ; SSE-NEXT: por %xmm1, %xmm5 ; SSE-NEXT: movdqa {{.*#+}} xmm8 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; SSE-NEXT: movdqa %xmm8, %xmm7 ; SSE-NEXT: pandn %xmm5, %xmm7 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,5,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,2,2,2] ; SSE-NEXT: movdqa %xmm2, %xmm9 ; SSE-NEXT: pandn %xmm5, %xmm9 ; SSE-NEXT: punpckhbw {{.*#+}} xmm10 = xmm10[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm10[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,7,6,4] ; SSE-NEXT: pand %xmm2, %xmm5 ; SSE-NEXT: por %xmm9, %xmm5 ; SSE-NEXT: pand %xmm8, %xmm5 ; SSE-NEXT: por %xmm7, %xmm5 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm13[2,2,2,2] ; SSE-NEXT: movdqa %xmm12, %xmm0 ; SSE-NEXT: pandn %xmm7, %xmm0 ; SSE-NEXT: pand %xmm12, %xmm5 ; SSE-NEXT: por %xmm5, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload ; SSE-NEXT: movdqa %xmm13, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm13[0],xmm0[1],xmm13[1],xmm0[2],xmm13[2],xmm0[3],xmm13[3],xmm0[4],xmm13[4],xmm0[5],xmm13[5],xmm0[6],xmm13[6],xmm0[7],xmm13[7] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5] ; SSE-NEXT: movdqa %xmm15, %xmm7 ; SSE-NEXT: pandn %xmm5, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,0,0] ; SSE-NEXT: pand %xmm15, %xmm5 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa %xmm6, %xmm5 ; SSE-NEXT: pandn %xmm7, %xmm5 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,1,1] ; SSE-NEXT: movdqa %xmm2, %xmm9 ; SSE-NEXT: pandn %xmm7, %xmm9 ; SSE-NEXT: movdqa %xmm14, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm14[0],xmm1[1],xmm14[1],xmm1[2],xmm14[2],xmm1[3],xmm14[3],xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7] ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm2, %xmm7 ; SSE-NEXT: por %xmm9, %xmm7 ; SSE-NEXT: pand %xmm6, %xmm7 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,0,1,1] ; SSE-NEXT: movdqa %xmm4, %xmm3 ; SSE-NEXT: pandn %xmm5, %xmm3 ; SSE-NEXT: pand %xmm4, %xmm7 ; SSE-NEXT: por %xmm7, %xmm3 ; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: punpckhbw {{.*#+}} xmm14 = xmm14[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm14[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,4,7] ; SSE-NEXT: movdqa %xmm4, %xmm7 ; SSE-NEXT: pandn %xmm5, %xmm7 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,2,3,3] ; SSE-NEXT: pand %xmm4, %xmm5 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa %xmm8, %xmm5 ; SSE-NEXT: pandn %xmm7, %xmm5 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm10[0,1,2,3,5,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,2,2,2] ; SSE-NEXT: movdqa %xmm2, %xmm9 ; SSE-NEXT: pandn %xmm7, %xmm9 ; SSE-NEXT: movdqa %xmm13, %xmm0 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm13[8],xmm0[9],xmm13[9],xmm0[10],xmm13[10],xmm0[11],xmm13[11],xmm0[12],xmm13[12],xmm0[13],xmm13[13],xmm0[14],xmm13[14],xmm0[15],xmm13[15] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm0[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,4] ; SSE-NEXT: pand %xmm2, %xmm7 ; SSE-NEXT: por %xmm9, %xmm7 ; SSE-NEXT: pand %xmm8, %xmm7 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,2,2,2] ; SSE-NEXT: movdqa %xmm12, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: pand %xmm12, %xmm7 ; SSE-NEXT: por %xmm7, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; SSE-NEXT: movdqa %xmm3, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5] ; SSE-NEXT: movdqa %xmm15, %xmm7 ; SSE-NEXT: pandn %xmm5, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,0,0] ; SSE-NEXT: pand %xmm15, %xmm5 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa %xmm6, %xmm5 ; SSE-NEXT: pandn %xmm7, %xmm5 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm13[1,1,1,1] ; SSE-NEXT: movdqa %xmm2, %xmm9 ; SSE-NEXT: pandn %xmm7, %xmm9 ; SSE-NEXT: movdqa %xmm11, %xmm14 ; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm11[0],xmm14[1],xmm11[1],xmm14[2],xmm11[2],xmm14[3],xmm11[3],xmm14[4],xmm11[4],xmm14[5],xmm11[5],xmm14[6],xmm11[6],xmm14[7],xmm11[7] ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm14[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm2, %xmm7 ; SSE-NEXT: por %xmm9, %xmm7 ; SSE-NEXT: pand %xmm6, %xmm7 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,1,1] ; SSE-NEXT: movdqa %xmm4, %xmm1 ; SSE-NEXT: pandn %xmm5, %xmm1 ; SSE-NEXT: pand %xmm4, %xmm7 ; SSE-NEXT: por %xmm7, %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm11[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,4,7] ; SSE-NEXT: movdqa %xmm4, %xmm7 ; SSE-NEXT: pandn %xmm5, %xmm7 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[2,2,3,3] ; SSE-NEXT: pand %xmm4, %xmm5 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa %xmm8, %xmm5 ; SSE-NEXT: pandn %xmm7, %xmm5 ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm10[0,1,2,3,5,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,2,2,2] ; SSE-NEXT: movdqa %xmm2, %xmm9 ; SSE-NEXT: pandn %xmm7, %xmm9 ; SSE-NEXT: movdqa %xmm3, %xmm1 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,7,6,4] ; SSE-NEXT: pand %xmm2, %xmm7 ; SSE-NEXT: por %xmm9, %xmm7 ; SSE-NEXT: pand %xmm8, %xmm7 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[2,2,2,2] ; SSE-NEXT: movdqa %xmm12, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: pand %xmm12, %xmm7 ; SSE-NEXT: por %xmm7, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movdqa %xmm0, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,3,2,0,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5] ; SSE-NEXT: movdqa %xmm15, %xmm7 ; SSE-NEXT: pandn %xmm5, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm10[1,2,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,0,0] ; SSE-NEXT: pand %xmm15, %xmm5 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm11[1,1,1,1] ; SSE-NEXT: movdqa %xmm2, %xmm9 ; SSE-NEXT: pandn %xmm5, %xmm9 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; SSE-NEXT: movdqa %xmm3, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,1,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,0,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,6,6] ; SSE-NEXT: pand %xmm2, %xmm5 ; SSE-NEXT: por %xmm9, %xmm5 ; SSE-NEXT: pand %xmm6, %xmm5 ; SSE-NEXT: pandn %xmm7, %xmm6 ; SSE-NEXT: por %xmm5, %xmm6 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm9[0,0,1,1] ; SSE-NEXT: movdqa %xmm4, %xmm1 ; SSE-NEXT: pandn %xmm5, %xmm1 ; SSE-NEXT: pand %xmm4, %xmm6 ; SSE-NEXT: por %xmm6, %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa %xmm3, %xmm1 ; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm3[8],xmm1[9],xmm3[9],xmm1[10],xmm3[10],xmm1[11],xmm3[11],xmm1[12],xmm3[12],xmm1[13],xmm3[13],xmm1[14],xmm3[14],xmm1[15],xmm3[15] ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[2,2,2,2,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,5,4,7] ; SSE-NEXT: movdqa %xmm4, %xmm6 ; SSE-NEXT: pandn %xmm5, %xmm6 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm11[2,2,3,3] ; SSE-NEXT: pand %xmm4, %xmm5 ; SSE-NEXT: por %xmm5, %xmm6 ; SSE-NEXT: movdqa %xmm8, %xmm5 ; SSE-NEXT: pandn %xmm6, %xmm5 ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm10[0,1,2,3,5,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,2,2,2] ; SSE-NEXT: movdqa %xmm2, %xmm7 ; SSE-NEXT: pandn %xmm6, %xmm7 ; SSE-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15] ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,1,2,1] ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,6,4] ; SSE-NEXT: pand %xmm2, %xmm6 ; SSE-NEXT: por %xmm7, %xmm6 ; SSE-NEXT: pand %xmm8, %xmm6 ; SSE-NEXT: por %xmm5, %xmm6 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm9[2,2,2,2] ; SSE-NEXT: movdqa %xmm12, %xmm0 ; SSE-NEXT: pandn %xmm5, %xmm0 ; SSE-NEXT: pand %xmm12, %xmm6 ; SSE-NEXT: por %xmm6, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshuflw $225, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload ; SSE-NEXT: # xmm5 = mem[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,1] ; SSE-NEXT: movdqa %xmm4, %xmm6 ; SSE-NEXT: pandn %xmm5, %xmm6 ; SSE-NEXT: movdqa (%rsp), %xmm3 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm3[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,0,0] ; SSE-NEXT: pand %xmm4, %xmm5 ; SSE-NEXT: por %xmm5, %xmm6 ; SSE-NEXT: movdqa %xmm8, %xmm5 ; SSE-NEXT: pandn %xmm6, %xmm5 ; SSE-NEXT: pshuflw $164, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload ; SSE-NEXT: # xmm6 = mem[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,1,3] ; SSE-NEXT: movdqa %xmm12, %xmm7 ; SSE-NEXT: pandn %xmm6, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[0,0,0,0] ; SSE-NEXT: pand %xmm12, %xmm6 ; SSE-NEXT: por %xmm6, %xmm7 ; SSE-NEXT: pand %xmm8, %xmm7 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,0,0] ; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: pandn %xmm5, %xmm1 ; SSE-NEXT: pand %xmm2, %xmm7 ; SSE-NEXT: por %xmm7, %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload ; SSE-NEXT: # xmm5 = mem[0,1,2,3,5,7,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,2,3,2] ; SSE-NEXT: movdqa %xmm2, %xmm6 ; SSE-NEXT: pandn %xmm5, %xmm6 ; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm3[0,1,2,3,7,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,2,2,2] ; SSE-NEXT: pand %xmm2, %xmm5 ; SSE-NEXT: por %xmm5, %xmm6 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: movdqa %xmm1, %xmm5 ; SSE-NEXT: pandn %xmm6, %xmm5 ; SSE-NEXT: pshufhw $167, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload ; SSE-NEXT: # xmm6 = mem[0,1,2,3,7,5,6,6] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,2,2] ; SSE-NEXT: movdqa %xmm15, %xmm7 ; SSE-NEXT: pandn %xmm6, %xmm7 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm9[3,3,3,3] ; SSE-NEXT: pand %xmm15, %xmm6 ; SSE-NEXT: por %xmm6, %xmm7 ; SSE-NEXT: pand %xmm1, %xmm7 ; SSE-NEXT: por %xmm5, %xmm7 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[3,3,3,3] ; SSE-NEXT: movdqa {{.*#+}} xmm9 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; SSE-NEXT: movdqa %xmm9, %xmm13 ; SSE-NEXT: pandn %xmm5, %xmm13 ; SSE-NEXT: pand %xmm9, %xmm7 ; SSE-NEXT: por %xmm7, %xmm13 ; SSE-NEXT: pshuflw $225, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload ; SSE-NEXT: # xmm5 = mem[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,1] ; SSE-NEXT: movdqa %xmm4, %xmm6 ; SSE-NEXT: pandn %xmm5, %xmm6 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm11[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,0,0,0] ; SSE-NEXT: pand %xmm4, %xmm5 ; SSE-NEXT: por %xmm5, %xmm6 ; SSE-NEXT: movdqa %xmm8, %xmm5 ; SSE-NEXT: pandn %xmm6, %xmm5 ; SSE-NEXT: pshuflw $164, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload ; SSE-NEXT: # xmm6 = mem[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,1,3] ; SSE-NEXT: movdqa %xmm12, %xmm3 ; SSE-NEXT: pandn %xmm6, %xmm3 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,0,0,0] ; SSE-NEXT: pand %xmm12, %xmm6 ; SSE-NEXT: por %xmm6, %xmm3 ; SSE-NEXT: pand %xmm8, %xmm3 ; SSE-NEXT: por %xmm5, %xmm3 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm0[0,0,0,0] ; SSE-NEXT: movdqa %xmm2, %xmm10 ; SSE-NEXT: pandn %xmm5, %xmm10 ; SSE-NEXT: pand %xmm2, %xmm3 ; SSE-NEXT: por %xmm3, %xmm10 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload ; SSE-NEXT: # xmm3 = mem[0,1,2,3,5,7,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,3,2] ; SSE-NEXT: movdqa %xmm2, %xmm5 ; SSE-NEXT: pandn %xmm3, %xmm5 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm11[0,1,2,3,7,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,2,2] ; SSE-NEXT: pand %xmm2, %xmm3 ; SSE-NEXT: por %xmm3, %xmm5 ; SSE-NEXT: movdqa {{.*#+}} xmm7 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: movdqa %xmm7, %xmm3 ; SSE-NEXT: pandn %xmm5, %xmm3 ; SSE-NEXT: pshufhw $167, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload ; SSE-NEXT: # xmm5 = mem[0,1,2,3,7,5,6,6] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,2] ; SSE-NEXT: movdqa %xmm15, %xmm6 ; SSE-NEXT: pandn %xmm5, %xmm6 ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[3,3,3,3] ; SSE-NEXT: pand %xmm15, %xmm5 ; SSE-NEXT: por %xmm5, %xmm6 ; SSE-NEXT: pand %xmm7, %xmm6 ; SSE-NEXT: por %xmm3, %xmm6 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[3,3,3,3] ; SSE-NEXT: movdqa %xmm9, %xmm7 ; SSE-NEXT: pandn %xmm3, %xmm7 ; SSE-NEXT: pand %xmm9, %xmm6 ; SSE-NEXT: por %xmm6, %xmm7 ; SSE-NEXT: pshuflw $225, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload ; SSE-NEXT: # xmm3 = mem[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,1] ; SSE-NEXT: movdqa %xmm4, %xmm6 ; SSE-NEXT: pandn %xmm3, %xmm6 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-NEXT: pand %xmm4, %xmm3 ; SSE-NEXT: por %xmm3, %xmm6 ; SSE-NEXT: movdqa %xmm8, %xmm3 ; SSE-NEXT: pandn %xmm6, %xmm3 ; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm14[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,0,1,3] ; SSE-NEXT: movdqa %xmm12, %xmm11 ; SSE-NEXT: pandn %xmm6, %xmm11 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[0,0,0,0] ; SSE-NEXT: pand %xmm12, %xmm6 ; SSE-NEXT: por %xmm6, %xmm11 ; SSE-NEXT: pand %xmm8, %xmm11 ; SSE-NEXT: por %xmm3, %xmm11 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[0,0,0,0] ; SSE-NEXT: movdqa %xmm2, %xmm14 ; SSE-NEXT: pandn %xmm3, %xmm14 ; SSE-NEXT: pand %xmm2, %xmm11 ; SSE-NEXT: por %xmm11, %xmm14 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload ; SSE-NEXT: # xmm3 = mem[0,1,2,3,5,7,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,3,2] ; SSE-NEXT: movdqa %xmm2, %xmm6 ; SSE-NEXT: pandn %xmm3, %xmm6 ; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,7,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,2,2,2] ; SSE-NEXT: pand %xmm2, %xmm3 ; SSE-NEXT: por %xmm3, %xmm6 ; SSE-NEXT: movdqa {{.*#+}} xmm5 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: movdqa %xmm5, %xmm3 ; SSE-NEXT: pandn %xmm6, %xmm3 ; SSE-NEXT: pshufhw $167, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload ; SSE-NEXT: # xmm6 = mem[0,1,2,3,7,5,6,6] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[2,3,2,2] ; SSE-NEXT: movdqa %xmm15, %xmm11 ; SSE-NEXT: pandn %xmm6, %xmm11 ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm0[3,3,3,3] ; SSE-NEXT: pand %xmm15, %xmm6 ; SSE-NEXT: por %xmm6, %xmm11 ; SSE-NEXT: pand %xmm5, %xmm11 ; SSE-NEXT: por %xmm3, %xmm11 ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm1[3,3,3,3] ; SSE-NEXT: movdqa %xmm9, %xmm6 ; SSE-NEXT: pandn %xmm3, %xmm6 ; SSE-NEXT: pand %xmm9, %xmm11 ; SSE-NEXT: por %xmm11, %xmm6 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,0,0,0] ; SSE-NEXT: pand %xmm4, %xmm3 ; SSE-NEXT: pshuflw $225, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload ; SSE-NEXT: # xmm11 = mem[1,0,2,3,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,1,0,1] ; SSE-NEXT: pandn %xmm11, %xmm4 ; SSE-NEXT: por %xmm3, %xmm4 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm5[0,0,0,0] ; SSE-NEXT: pand %xmm12, %xmm3 ; SSE-NEXT: pshuflw $164, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload ; SSE-NEXT: # xmm11 = mem[0,1,2,2,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm11[0,0,1,3] ; SSE-NEXT: pandn %xmm11, %xmm12 ; SSE-NEXT: por %xmm3, %xmm12 ; SSE-NEXT: pand %xmm8, %xmm12 ; SSE-NEXT: pandn %xmm4, %xmm8 ; SSE-NEXT: por %xmm12, %xmm8 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,0,0,0] ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: pandn %xmm3, %xmm0 ; SSE-NEXT: pand %xmm2, %xmm8 ; SSE-NEXT: por %xmm8, %xmm0 ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,7,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,2,2,2] ; SSE-NEXT: pand %xmm2, %xmm1 ; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload ; SSE-NEXT: # xmm3 = mem[0,1,2,3,5,7,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,2,3,2] ; SSE-NEXT: pandn %xmm3, %xmm2 ; SSE-NEXT: por %xmm1, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[3,3,3,3] ; SSE-NEXT: pand %xmm15, %xmm1 ; SSE-NEXT: pshufhw $167, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload ; SSE-NEXT: # xmm3 = mem[0,1,2,3,7,5,6,6] ; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[2,3,2,2] ; SSE-NEXT: pandn %xmm3, %xmm15 ; SSE-NEXT: por %xmm1, %xmm15 ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255] ; SSE-NEXT: pand %xmm1, %xmm15 ; SSE-NEXT: pandn %xmm2, %xmm1 ; SSE-NEXT: por %xmm15, %xmm1 ; SSE-NEXT: pand %xmm9, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm4[3,3,3,3] ; SSE-NEXT: pandn %xmm1, %xmm9 ; SSE-NEXT: por %xmm2, %xmm9 ; SSE-NEXT: movdqa %xmm9, 304(%r9) ; SSE-NEXT: movdqa %xmm0, 240(%r9) ; SSE-NEXT: movdqa %xmm6, 224(%r9) ; SSE-NEXT: movdqa %xmm14, 160(%r9) ; SSE-NEXT: movdqa %xmm7, 144(%r9) ; SSE-NEXT: movdqa %xmm10, 80(%r9) ; SSE-NEXT: movdqa %xmm13, 64(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, (%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 288(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 256(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 208(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 176(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 128(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 96(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 48(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 16(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 272(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 192(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 112(%r9) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 32(%r9) ; SSE-NEXT: addq $504, %rsp # imm = 0x1F8 ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: store_i8_stride5_vf64: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: subq $104, %rsp ; AVX1-ONLY-NEXT: vmovdqa 48(%rcx), %xmm0 ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm1 = [128,6,128,8,0,128,7,128,128,6,128,8,0,128,7,128] ; AVX1-ONLY-NEXT: # xmm1 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm0, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, %xmm14 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdx), %xmm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm1[6],zero,xmm1[u,u,u,7],zero,xmm1[u,u,u,8],zero,xmm1[u,u,u,9] ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm9[u,6,7,u,u,u,8,9,u,u,u,10,11,u,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm5 = [255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0] ; AVX1-ONLY-NEXT: vandnps %ymm3, %ymm5, %ymm4 ; AVX1-ONLY-NEXT: vmovaps %ymm5, %ymm2 ; AVX1-ONLY-NEXT: vmovdqa 48(%rsi), %xmm3 ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm5 = [128,8,0,128,7,128,9,0,128,8,0,128,7,128,9,0] ; AVX1-ONLY-NEXT: # xmm5 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm3, %xmm6 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, %xmm10 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm5 ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm8 = [8,128,0,7,128,9,128,0,8,128,0,7,128,9,128,0] ; AVX1-ONLY-NEXT: # xmm8 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm5, %xmm7 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, %xmm12 ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm7, %xmm6 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm7 = xmm7[6,u,u,u,9,8,u,u,u,11,10,u,u,u,13,12] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm7, %ymm6 ; AVX1-ONLY-NEXT: vandps %ymm2, %ymm6, %ymm6 ; AVX1-ONLY-NEXT: vorps %ymm4, %ymm6, %ymm4 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm4, %xmm6 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1],zero,xmm6[3,4,5,6],zero,xmm6[8,9,10,11],zero,xmm6[13,14,15] ; AVX1-ONLY-NEXT: vmovdqa 48(%r8), %xmm7 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm8 = zero,zero,xmm7[6],zero,zero,zero,zero,xmm7[7],zero,zero,zero,zero,xmm7[8],zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm8, %xmm6 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,2],zero,xmm4[4,5,6,7],zero,xmm4[9,10,11,12],zero,xmm4[14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,xmm7[3],zero,zero,zero,zero,xmm7[4],zero,zero,zero,zero,xmm7[5],zero,zero ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm5[8],xmm3[8],xmm5[9],xmm3[9],xmm5[10],xmm3[10],xmm5[11],xmm3[11],xmm5[12],xmm3[12],xmm5[13],xmm3[13],xmm5[14],xmm3[14],xmm5[15],xmm3[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm4[u,10,11,u,u,u,12,13,u,u,u,14,15,u,u,u] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm8 = [7,0,4,5,8,9,0,6,7,0,4,5,8,9,0,6] ; AVX1-ONLY-NEXT: # xmm8 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm4, %ymm4 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm1[8],xmm0[8],xmm1[9],xmm0[9],xmm1[10],xmm0[10],xmm1[11],xmm0[11],xmm1[12],xmm0[12],xmm1[13],xmm0[13],xmm1[14],xmm0[14],xmm1[15],xmm0[15] ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm6[u,u,u,10,11,u,u,u,12,13,u,u,u,14,15,u] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [2,7,6,0,5,4,9,8,2,7,6,0,5,4,9,8] ; AVX1-ONLY-NEXT: # xmm6 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, %xmm15 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm11 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm4, %ymm11, %ymm1 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm11, %ymm0 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = zero,xmm1[1,2,3,4],zero,xmm1[6,7,8,9],zero,xmm1[11,12,13,14],zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm7[12],zero,zero,zero,zero,xmm7[13],zero,zero,zero,zero,xmm7[14],zero,zero,zero,zero,xmm7[15] ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm13 = [0,128,2,3,4,5,128,7,8,9,10,128,12,13,14,15] ; AVX1-ONLY-NEXT: vpshufb %xmm13, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm2 = [128,9,128,128,128,128,10,128,128,128,128,11,128,128,128,128] ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm7, %xmm1 ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 32(%rsi), %xmm0 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm4 ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm0, %xmm1 ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm4, %xmm6 ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm6, %xmm1 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm12 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15] ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm12, %xmm6 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm1, %ymm6 ; AVX1-ONLY-NEXT: vmovdqa 32(%rcx), %xmm1 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdx), %xmm8 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm10 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = xmm8[6],zero,xmm8[u,u,u,7],zero,xmm8[u,u,u,8],zero,xmm8[u,u,u,9] ; AVX1-ONLY-NEXT: vpor %xmm10, %xmm14, %xmm10 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm14 = xmm1[8],xmm8[8],xmm1[9],xmm8[9],xmm1[10],xmm8[10],xmm1[11],xmm8[11],xmm1[12],xmm8[12],xmm1[13],xmm8[13],xmm1[14],xmm8[14],xmm1[15],xmm8[15] ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm14, %xmm14 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm14, %ymm10, %ymm14 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm10 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm6, %ymm10, %ymm6 ; AVX1-ONLY-NEXT: vandps %ymm10, %ymm14, %ymm14 ; AVX1-ONLY-NEXT: vorps %ymm6, %ymm14, %ymm14 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm14, %xmm6 ; AVX1-ONLY-NEXT: vpshufb %xmm13, %xmm6, %xmm15 ; AVX1-ONLY-NEXT: vmovdqa 32(%r8), %xmm6 ; AVX1-ONLY-NEXT: vpshufb %xmm2, %xmm6, %xmm13 ; AVX1-ONLY-NEXT: vpor %xmm13, %xmm15, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm14[0,1],zero,xmm14[3,4,5,6],zero,xmm14[8,9,10,11],zero,xmm14[13,14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,xmm6[6],zero,zero,zero,zero,xmm6[7],zero,zero,zero,zero,xmm6[8],zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm14, %xmm13, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, (%rsp) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm14 = [3,0,0,1,4,5,0,2,3,0,0,1,4,5,0,2] ; AVX1-ONLY-NEXT: # xmm14 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm9, %xmm2 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm13 = xmm8[8],xmm1[8],xmm8[9],xmm1[9],xmm8[10],xmm1[10],xmm8[11],xmm1[11],xmm8[12],xmm1[12],xmm8[13],xmm1[13],xmm8[14],xmm1[14],xmm8[15],xmm1[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[u,u,u,10,11,u,u,u,12,13,u,u,u,14,15,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm13, %ymm2 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm13 = [0,1,4,5,0,2,3,6,0,1,4,5,0,2,3,6] ; AVX1-ONLY-NEXT: # xmm13 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm13, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm9 = [0,10,11,14,15,0,12,13,0,10,11,14,15,0,12,13] ; AVX1-ONLY-NEXT: # xmm9 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm12, %xmm5 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm5, %ymm3 ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm10, %ymm2 ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm10, %ymm3 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm3, %ymm2 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm3 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = [0,1,2,3,128,5,6,7,8,128,10,11,12,13,128,15] ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, %xmm12 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm10 = [128,128,128,128,0,128,128,128,128,1,128,128,128,128,2,128] ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm7, %xmm5 ; AVX1-ONLY-NEXT: vmovdqa %xmm10, %xmm7 ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm5 = [128,1,2,3,4,128,6,7,8,9,128,11,12,13,14,128] ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm10 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15] ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm6, %xmm3 ; AVX1-ONLY-NEXT: vpor %xmm3, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm1 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm1[u,6,7,u,u,u,8,9,u,u,u,10,11,u,u,u] ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3],xmm0[4],xmm4[4],xmm0[5],xmm4[5],xmm0[6],xmm4[6],xmm0[7],xmm4[7] ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3],xmm4[4],xmm0[4],xmm4[5],xmm0[5],xmm4[6],xmm0[6],xmm4[7],xmm0[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[6,u,u,u,9,8,u,u,u,11,10,u,u,u,13,12] ; AVX1-ONLY-NEXT: vpshufb %xmm13, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0 ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm11, %ymm1 ; AVX1-ONLY-NEXT: vandps %ymm0, %ymm11, %ymm0 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm0, %ymm0 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm0, %xmm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm1 = xmm1[0,1,2],zero,xmm1[4,5,6,7],zero,xmm1[9,10,11,12],zero,xmm1[14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = zero,zero,zero,xmm6[3],zero,zero,zero,zero,xmm6[4],zero,zero,zero,zero,xmm6[5],zero,zero ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpshufb %xmm7, %xmm6, %xmm1 ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 16(%rsi), %xmm8 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm6 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm0 = xmm6[8],xmm8[8],xmm6[9],xmm8[9],xmm6[10],xmm8[10],xmm6[11],xmm8[11],xmm6[12],xmm8[12],xmm6[13],xmm8[13],xmm6[14],xmm8[14],xmm6[15],xmm8[15] ; AVX1-ONLY-NEXT: vpshufb %xmm9, %xmm0, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa %xmm9, %xmm15 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[u,u,4,5,u,u,u,6,7,u,u,u,8,9,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0 ; AVX1-ONLY-NEXT: vmovdqa 16(%rcx), %xmm1 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdx), %xmm2 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm2[8],xmm1[8],xmm2[9],xmm1[9],xmm2[10],xmm1[10],xmm2[11],xmm1[11],xmm2[12],xmm1[12],xmm2[13],xmm1[13],xmm2[14],xmm1[14],xmm2[15],xmm1[15] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm12 = [12,13,0,10,11,14,15,0,12,13,0,10,11,14,15,0] ; AVX1-ONLY-NEXT: # xmm12 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm4 = xmm1[8],xmm2[8],xmm1[9],xmm2[9],xmm1[10],xmm2[10],xmm1[11],xmm2[11],xmm1[12],xmm2[12],xmm1[13],xmm2[13],xmm1[14],xmm2[14],xmm1[15],xmm2[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[2,u,u,u,5,4,u,u,u,7,6,u,u,u,9,8] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm3, %ymm4, %ymm3 ; AVX1-ONLY-NEXT: vandnps %ymm0, %ymm11, %ymm0 ; AVX1-ONLY-NEXT: vandps %ymm3, %ymm11, %ymm3 ; AVX1-ONLY-NEXT: vorps %ymm0, %ymm3, %ymm3 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm0 ; AVX1-ONLY-NEXT: vpshufb %xmm5, %xmm0, %xmm4 ; AVX1-ONLY-NEXT: vmovdqa %xmm5, %xmm11 ; AVX1-ONLY-NEXT: vmovdqa 16(%r8), %xmm0 ; AVX1-ONLY-NEXT: vpshufb %xmm10, %xmm0, %xmm7 ; AVX1-ONLY-NEXT: vpor %xmm7, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[0],zero,xmm3[2,3,4,5],zero,xmm3[7,8,9,10],zero,xmm3[12,13,14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = zero,xmm0[9],zero,zero,zero,zero,xmm0[10],zero,zero,zero,zero,xmm0[11],zero,zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa (%rcx), %xmm9 ; AVX1-ONLY-NEXT: vmovdqa (%rdx), %xmm7 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm3 = xmm7[8],xmm9[8],xmm7[9],xmm9[9],xmm7[10],xmm9[10],xmm7[11],xmm9[11],xmm7[12],xmm9[12],xmm7[13],xmm9[13],xmm7[14],xmm9[14],xmm7[15],xmm9[15] ; AVX1-ONLY-NEXT: vpshufb %xmm12, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7] ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm4, %xmm5 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm12 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm6[0],xmm8[0],xmm6[1],xmm8[1],xmm6[2],xmm8[2],xmm6[3],xmm8[3],xmm6[4],xmm8[4],xmm6[5],xmm8[5],xmm6[6],xmm8[6],xmm6[7],xmm8[7] ; AVX1-ONLY-NEXT: vpshufb %xmm13, %xmm3, %xmm13 ; AVX1-ONLY-NEXT: vmovdqa (%rsi), %xmm5 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm3 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm10 = xmm3[8],xmm5[8],xmm3[9],xmm5[9],xmm3[10],xmm5[10],xmm3[11],xmm5[11],xmm3[12],xmm5[12],xmm3[13],xmm5[13],xmm3[14],xmm5[14],xmm3[15],xmm5[15] ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm10, %xmm14 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm13, %ymm14, %ymm13 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm14 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm12, %ymm14, %ymm12 ; AVX1-ONLY-NEXT: vandps %ymm14, %ymm13, %ymm13 ; AVX1-ONLY-NEXT: vorps %ymm12, %ymm13, %ymm12 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm12, %xmm13 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm13 = xmm13[0,1,2,3],zero,xmm13[5,6,7,8],zero,xmm13[10,11,12,13],zero,xmm13[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm14 = zero,zero,zero,zero,xmm0[0],zero,zero,zero,zero,xmm0[1],zero,zero,zero,zero,xmm0[2],zero ; AVX1-ONLY-NEXT: vpor %xmm14, %xmm13, %xmm13 ; AVX1-ONLY-NEXT: vmovdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm12, %xmm12 ; AVX1-ONLY-NEXT: vmovdqa (%r8), %xmm13 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm15 = xmm13[12],zero,zero,zero,zero,xmm13[13],zero,zero,zero,zero,xmm13[14],zero,zero,zero,zero,xmm13[15] ; AVX1-ONLY-NEXT: vpor %xmm15, %xmm12, %xmm11 ; AVX1-ONLY-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm11 = [128,6,128,8,0,128,7,128,128,6,128,8,0,128,7,128] ; AVX1-ONLY-NEXT: # xmm11 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[6],zero,xmm2[u,u,u,7],zero,xmm2[u,u,u,8],zero,xmm2[u,u,u,9] ; AVX1-ONLY-NEXT: vpor %xmm1, %xmm2, %xmm1 ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm11 = [0,6,7,10,11,0,8,9,0,6,7,10,11,0,8,9] ; AVX1-ONLY-NEXT: # xmm11 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm4, %xmm2 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm1 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm8[u,u,u],zero,xmm8[7,u,u,u],zero,xmm8[8,u,u,u],zero,xmm8[9,u] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm6[u,u,u,7],zero,xmm6[u,u,u,8],zero,xmm6[u,u,u,9],zero,xmm6[u] ; AVX1-ONLY-NEXT: vpor %xmm2, %xmm4, %xmm2 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm8[0],xmm6[0],xmm8[1],xmm6[1],xmm8[2],xmm6[2],xmm8[3],xmm6[3],xmm8[4],xmm6[4],xmm8[5],xmm6[5],xmm8[6],xmm6[6],xmm8[7],xmm6[7] ; AVX1-ONLY-NEXT: vmovddup {{.*#+}} xmm6 = [6,11,10,0,9,8,13,12,6,11,10,0,9,8,13,12] ; AVX1-ONLY-NEXT: # xmm6 = mem[0,0] ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm2, %ymm4, %ymm2 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm4 = [255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0] ; AVX1-ONLY-NEXT: vandnps %ymm1, %ymm4, %ymm1 ; AVX1-ONLY-NEXT: vandps %ymm4, %ymm2, %ymm2 ; AVX1-ONLY-NEXT: vorps %ymm1, %ymm2, %ymm2 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm2, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,128,3,4,5,6,128,8,9,10,11,128,13,14,15] ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm15 = [128,128,6,128,128,128,128,7,128,128,128,128,8,128,128,128] ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm0, %xmm4 ; AVX1-ONLY-NEXT: vpor %xmm4, %xmm1, %xmm12 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,2,128,4,5,6,7,128,9,10,11,12,128,14,15] ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vmovdqa {{.*#+}} xmm8 = [128,128,128,3,128,128,128,128,4,128,128,128,128,5,128,128] ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpor %xmm0, %xmm2, %xmm0 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm2 = xmm7[0],xmm9[0],xmm7[1],xmm9[1],xmm7[2],xmm9[2],xmm7[3],xmm9[3],xmm7[4],xmm9[4],xmm7[5],xmm9[5],xmm7[6],xmm9[6],xmm7[7],xmm9[7] ; AVX1-ONLY-NEXT: vpshufb %xmm11, %xmm2, %xmm4 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm2 = xmm2[u,u,0,1,u,u,u,2,3,u,u,u,4,5,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm2, %ymm2 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3],xmm5[4],xmm3[4],xmm5[5],xmm3[5],xmm5[6],xmm3[6],xmm5[7],xmm3[7] ; AVX1-ONLY-NEXT: vpshufb %xmm6, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[0,1,u,u,u,2,3,u,u,u,4,5,u,u,u,6] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm4, %ymm6, %ymm4 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm6 = [255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm2, %ymm6, %ymm2 ; AVX1-ONLY-NEXT: vandps %ymm6, %ymm4, %ymm4 ; AVX1-ONLY-NEXT: vorps %ymm2, %ymm4, %ymm4 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm4, %xmm2 ; AVX1-ONLY-NEXT: vpshufb %xmm1, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpshufb %xmm8, %xmm13, %xmm6 ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm4 = xmm4[0,1,2,3],zero,xmm4[5,6,7,8],zero,xmm4[10,11,12,13],zero,xmm4[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,zero,zero,zero,xmm13[0],zero,zero,zero,zero,xmm13[1],zero,zero,zero,zero,xmm13[2],zero ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[u,u,u],zero,xmm5[7,u,u,u],zero,xmm5[8,u,u,u],zero,xmm5[9,u] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm3 = xmm3[u,u,u,7],zero,xmm3[u,u,u,8],zero,xmm3[u,u,u,9],zero,xmm3[u] ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm10[u,u,4,5,u,u,u,6,7,u,u,u,8,9,u,u] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm5, %ymm3, %ymm3 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = zero,xmm9[6,u,u,u],zero,xmm9[7,u,u,u],zero,xmm9[8,u,u,u],zero ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm7[6],zero,xmm7[u,u,u,7],zero,xmm7[u,u,u,8],zero,xmm7[u,u,u,9] ; AVX1-ONLY-NEXT: vpor %xmm5, %xmm6, %xmm5 ; AVX1-ONLY-NEXT: vpunpckhbw {{.*#+}} xmm6 = xmm9[8],xmm7[8],xmm9[9],xmm7[9],xmm9[10],xmm7[10],xmm9[11],xmm7[11],xmm9[12],xmm7[12],xmm9[13],xmm7[13],xmm9[14],xmm7[14],xmm9[15],xmm7[15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = xmm6[2,u,u,u,5,4,u,u,u,7,6,u,u,u,9,8] ; AVX1-ONLY-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5 ; AVX1-ONLY-NEXT: vmovaps {{.*#+}} ymm1 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255] ; AVX1-ONLY-NEXT: vandnps %ymm3, %ymm1, %ymm3 ; AVX1-ONLY-NEXT: vandps %ymm1, %ymm5, %ymm5 ; AVX1-ONLY-NEXT: vorps %ymm3, %ymm5, %ymm3 ; AVX1-ONLY-NEXT: vextractf128 $1, %ymm3, %xmm5 ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm5 = xmm5[0],zero,xmm5[2,3,4,5],zero,xmm5[7,8,9,10],zero,xmm5[12,13,14,15] ; AVX1-ONLY-NEXT: vpshufb {{.*#+}} xmm6 = zero,xmm13[9],zero,zero,zero,zero,xmm13[10],zero,zero,zero,zero,xmm13[11],zero,zero,zero,zero ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm5, %xmm5 ; AVX1-ONLY-NEXT: vpshufb %xmm14, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpshufb %xmm15, %xmm13, %xmm6 ; AVX1-ONLY-NEXT: vpor %xmm6, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 32(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm5, 48(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm4, (%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 16(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 96(%r9) ; AVX1-ONLY-NEXT: vmovdqa %xmm12, 112(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 64(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 80(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 128(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 144(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 160(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 176(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 224(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 240(%r9) ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 192(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 208(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 288(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 304(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 256(%r9) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm0, 272(%r9) ; AVX1-ONLY-NEXT: addq $104, %rsp ; AVX1-ONLY-NEXT: vzeroupper ; AVX1-ONLY-NEXT: retq ; ; AVX2-SLOW-LABEL: store_i8_stride5_vf64: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: subq $312, %rsp # imm = 0x138 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm10 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %xmm1 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %xmm11 ; AVX2-SLOW-NEXT: vmovdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm0 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12] ; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm1, %xmm1 ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %xmm3 ; AVX2-SLOW-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %xmm12 ; AVX2-SLOW-NEXT: vmovdqa %xmm12, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128] ; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm3, %xmm3 ; AVX2-SLOW-NEXT: vpor %xmm1, %xmm3, %xmm1 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1] ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %xmm4 ; AVX2-SLOW-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm3 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11] ; AVX2-SLOW-NEXT: vpshufb %xmm3, %xmm4, %xmm4 ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %xmm14 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm5 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128] ; AVX2-SLOW-NEXT: vpshufb %xmm5, %xmm14, %xmm8 ; AVX2-SLOW-NEXT: vpor %xmm4, %xmm8, %xmm4 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm8 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm1, %ymm4, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa (%r8), %xmm4 ; AVX2-SLOW-NEXT: vmovdqa %xmm4, (%rsp) # 16-byte Spill ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,2] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm9, %ymm1, %ymm4, %ymm1 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %xmm4 ; AVX2-SLOW-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vpshufb %xmm0, %xmm11, %xmm0 ; AVX2-SLOW-NEXT: vpshufb %xmm2, %xmm12, %xmm1 ; AVX2-SLOW-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %xmm2 ; AVX2-SLOW-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vpshufb %xmm3, %xmm4, %xmm1 ; AVX2-SLOW-NEXT: vpshufb %xmm5, %xmm2, %xmm2 ; AVX2-SLOW-NEXT: vpor %xmm1, %xmm2, %xmm1 ; AVX2-SLOW-NEXT: vmovdqa 32(%rsi), %ymm2 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm0 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %xmm1 ; AVX2-SLOW-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm9, %ymm0, %ymm1, %ymm0 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23] ; AVX2-SLOW-NEXT: vpshufb %ymm15, %ymm10, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa %ymm10, %ymm11 ; AVX2-SLOW-NEXT: vmovdqu %ymm10, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX2-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128] ; AVX2-SLOW-NEXT: # ymm5 = mem[0,1,0,1] ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm2, %ymm3 ; AVX2-SLOW-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3] ; AVX2-SLOW-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa 32(%rcx), %ymm7 ; AVX2-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm3 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128] ; AVX2-SLOW-NEXT: # ymm3 = mem[0,1,0,1] ; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm7, %ymm4 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm9 = ymm4[2,2,3,3] ; AVX2-SLOW-NEXT: vmovdqa 32(%rdx), %ymm13 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25] ; AVX2-SLOW-NEXT: vpshufb %ymm4, %ymm13, %ymm12 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3] ; AVX2-SLOW-NEXT: vpor %ymm9, %ymm12, %ymm9 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0] ; AVX2-SLOW-NEXT: vpblendvb %ymm12, %ymm1, %ymm9, %ymm9 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm6 ; AVX2-SLOW-NEXT: vpshufb %ymm15, %ymm6, %ymm1 ; AVX2-SLOW-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX2-SLOW-NEXT: vmovdqa (%rsi), %ymm15 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm15, %ymm5 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3] ; AVX2-SLOW-NEXT: vpor %ymm1, %ymm5, %ymm5 ; AVX2-SLOW-NEXT: vmovdqa (%rcx), %ymm10 ; AVX2-SLOW-NEXT: vpshufb %ymm3, %ymm10, %ymm3 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm3[2,2,3,3] ; AVX2-SLOW-NEXT: vmovdqa (%rdx), %ymm8 ; AVX2-SLOW-NEXT: vpshufb %ymm4, %ymm8, %ymm4 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3] ; AVX2-SLOW-NEXT: vpor %ymm0, %ymm4, %ymm0 ; AVX2-SLOW-NEXT: vpblendvb %ymm12, %ymm5, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vmovdqa 32(%r8), %ymm12 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm12[0,2,1,1,4,6,5,5] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,3,2] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm9, %ymm4, %ymm1 ; AVX2-SLOW-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vmovdqa (%r8), %ymm9 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm9[0,2,1,1,4,6,5,5] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,3,3,2] ; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm0 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [3,3,3,u,4,4,4,4] ; AVX2-SLOW-NEXT: vpermd %ymm11, %ymm3, %ymm4 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm5 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14] ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm2, %ymm0 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm1, %ymm4, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpermd %ymm6, %ymm3, %ymm2 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm15, %ymm4 ; AVX2-SLOW-NEXT: vpblendvb %ymm1, %ymm2, %ymm4, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128] ; AVX2-SLOW-NEXT: vmovdqa %ymm7, %ymm3 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm7, %ymm4 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128] ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm13, %ymm11 ; AVX2-SLOW-NEXT: vpor %ymm4, %ymm11, %ymm4 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm0, %ymm4, %ymm0 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm10, %ymm2 ; AVX2-SLOW-NEXT: vpshufb %ymm5, %ymm8, %ymm4 ; AVX2-SLOW-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm1, %ymm2, %ymm1 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [3,3,3,3,u,4,4,4] ; AVX2-SLOW-NEXT: vpermd %ymm12, %ymm2, %ymm4 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm5 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm0, %ymm4, %ymm0 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vpermd %ymm9, %ymm2, %ymm0 ; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm1, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX2-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7] ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX2-SLOW-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm14 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX2-SLOW-NEXT: vpshufb %xmm14, %xmm0, %xmm0 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm1, %xmm1 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm6 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX2-SLOW-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm0, %xmm0 # 16-byte Folded Reload ; AVX2-SLOW-NEXT: # xmm0 = xmm0[0],mem[0],xmm0[1],mem[1],xmm0[2],mem[2],xmm0[3],mem[3],xmm0[4],mem[4],xmm0[5],mem[5],xmm0[6],mem[6],xmm0[7],mem[7] ; AVX2-SLOW-NEXT: vpshufb %xmm14, %xmm0, %xmm0 ; AVX2-SLOW-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX2-SLOW-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm1, %xmm1 # 16-byte Folded Reload ; AVX2-SLOW-NEXT: # xmm1 = xmm1[0],mem[0],xmm1[1],mem[1],xmm1[2],mem[2],xmm1[3],mem[3],xmm1[4],mem[4],xmm1[5],mem[5],xmm1[6],mem[6],xmm1[7],mem[7] ; AVX2-SLOW-NEXT: vpshufb %xmm7, %xmm1, %xmm1 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm11, %ymm0, %ymm1, %ymm0 ; AVX2-SLOW-NEXT: vpshufd $80, (%rsp), %xmm1 # 16-byte Folded Reload ; AVX2-SLOW-NEXT: # xmm1 = mem[0,0,1,1] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm2 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm6, %ymm1, %ymm6 ; AVX2-SLOW-NEXT: vpshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload ; AVX2-SLOW-NEXT: # xmm1 = mem[0,0,1,1] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,0,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm7 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12] ; AVX2-SLOW-NEXT: vpshufb %ymm0, %ymm3, %ymm1 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm13[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14] ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm4 = [255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0,255,0,255,0,0,255,0,255,0,255,0,0,255,0,255,0] ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,0,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm1, %ymm2, %ymm1 ; AVX2-SLOW-NEXT: vpshufb %ymm0, %ymm10, %ymm0 ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm8[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14] ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm2[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm0 ; AVX2-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm2 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14] ; AVX2-SLOW-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm3 # 32-byte Reload ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm3, %ymm3 ; AVX2-SLOW-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15] ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm5 = [255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255,255,0,0,255,0,255,0,0,0,0,255,0,255,0,0,255] ; AVX2-SLOW-NEXT: # ymm5 = mem[0,1,0,1] ; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3 ; AVX2-SLOW-NEXT: vpshufb %ymm2, %ymm15, %ymm2 ; AVX2-SLOW-NEXT: vpshufhw $230, {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Folded Reload ; AVX2-SLOW-NEXT: # ymm4 = mem[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15] ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u] ; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm1, %ymm3, %ymm1 ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX2-SLOW-NEXT: vpblendvb %ymm4, %ymm0, %ymm2, %ymm0 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm12[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm3 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm1, %ymm2, %ymm1 ; AVX2-SLOW-NEXT: vpshufd {{.*#+}} ymm2 = ymm9[2,2,3,3,6,6,7,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX2-SLOW-NEXT: vpblendvb %ymm3, %ymm0, %ymm2, %ymm0 ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload ; AVX2-SLOW-NEXT: vmovaps %ymm2, 64(%r9) ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload ; AVX2-SLOW-NEXT: vmovaps %ymm2, 224(%r9) ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload ; AVX2-SLOW-NEXT: vmovaps %ymm2, 96(%r9) ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm2 # 32-byte Reload ; AVX2-SLOW-NEXT: vmovaps %ymm2, 256(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 128(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm7, 160(%r9) ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-SLOW-NEXT: vmovaps %ymm0, 192(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm1, 288(%r9) ; AVX2-SLOW-NEXT: vmovdqa %ymm6, (%r9) ; AVX2-SLOW-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-SLOW-NEXT: vmovaps %ymm0, 32(%r9) ; AVX2-SLOW-NEXT: addq $312, %rsp # imm = 0x138 ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: store_i8_stride5_vf64: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: subq $168, %rsp ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %ymm13 ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %ymm10 ; AVX2-FAST-NEXT: vmovdqa (%r8), %ymm11 ; AVX2-FAST-NEXT: vmovdqa (%rcx), %xmm1 ; AVX2-FAST-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vmovdqa 32(%rcx), %xmm6 ; AVX2-FAST-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm0 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12] ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm1, %xmm1 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %xmm3 ; AVX2-FAST-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vmovdqa 32(%rdx), %xmm8 ; AVX2-FAST-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128] ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm3, %xmm3 ; AVX2-FAST-NEXT: vpor %xmm1, %xmm3, %xmm1 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1] ; AVX2-FAST-NEXT: vmovdqa (%rdi), %xmm4 ; AVX2-FAST-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm3 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11] ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm4, %xmm4 ; AVX2-FAST-NEXT: vmovdqa (%rsi), %xmm7 ; AVX2-FAST-NEXT: vmovdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128] ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm7, %xmm7 ; AVX2-FAST-NEXT: vpor %xmm4, %xmm7, %xmm4 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm1, %ymm4, %ymm1 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %xmm4 ; AVX2-FAST-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vpshufb %xmm0, %xmm6, %xmm0 ; AVX2-FAST-NEXT: vpshufb %xmm2, %xmm8, %xmm2 ; AVX2-FAST-NEXT: vpor %xmm0, %xmm2, %xmm0 ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %xmm6 ; AVX2-FAST-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-NEXT: vpshufb %xmm3, %xmm4, %xmm2 ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm6, %xmm3 ; AVX2-FAST-NEXT: vpor %xmm2, %xmm3, %xmm2 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [1,1,2,2,2,2,2,2] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1] ; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm0, %ymm2, %ymm0 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm3, %ymm2 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm1, %ymm2, %ymm1 ; AVX2-FAST-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-NEXT: vmovdqa 32(%r8), %ymm12 ; AVX2-FAST-NEXT: vpermd %ymm12, %ymm3, %ymm1 ; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm0, %ymm1, %ymm0 ; AVX2-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,29,26,128,28,128,30,128,28,29,128,31,128,29] ; AVX2-FAST-NEXT: vpshufb %ymm0, %ymm13, %ymm1 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128] ; AVX2-FAST-NEXT: # ymm8 = mem[0,1,0,1] ; AVX2-FAST-NEXT: vpshufb %ymm8, %ymm10, %ymm2 ; AVX2-FAST-NEXT: vmovdqa %ymm10, %ymm14 ; AVX2-FAST-NEXT: vmovdqu %ymm10, (%rsp) # 32-byte Spill ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,29,26,128,28,128,26,27,28,29,128,31,128,29,30,128] ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm3, %ymm2 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm2[2,2,3,3] ; AVX2-FAST-NEXT: vmovdqa 32(%rsi), %ymm2 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30] ; AVX2-FAST-NEXT: # ymm15 = mem[0,1,0,1] ; AVX2-FAST-NEXT: vpshufb %ymm15, %ymm2, %ymm5 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm5 = ymm5[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm4, %ymm5, %ymm4 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u] ; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm1, %ymm4, %ymm4 ; AVX2-FAST-NEXT: vmovdqa (%rdx), %ymm10 ; AVX2-FAST-NEXT: vpshufb %ymm0, %ymm10, %ymm0 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX2-FAST-NEXT: vmovdqa (%rcx), %ymm5 ; AVX2-FAST-NEXT: vpshufb %ymm8, %ymm5, %ymm1 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm0, %ymm1, %ymm8 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm1 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm1, %ymm0 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm0[2,2,3,3] ; AVX2-FAST-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-FAST-NEXT: vpshufb %ymm15, %ymm0, %ymm15 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm9, %ymm15, %ymm9 ; AVX2-FAST-NEXT: vpblendvb %ymm7, %ymm8, %ymm9, %ymm7 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [6,6,6,6,7,7,7,7] ; AVX2-FAST-NEXT: vpermd %ymm12, %ymm8, %ymm9 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm15 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX2-FAST-NEXT: vpblendvb %ymm15, %ymm4, %ymm9, %ymm4 ; AVX2-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm8, %ymm4 ; AVX2-FAST-NEXT: vpblendvb %ymm15, %ymm7, %ymm4, %ymm4 ; AVX2-FAST-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23] ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm3, %ymm7 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128] ; AVX2-FAST-NEXT: # ymm8 = mem[0,1,0,1] ; AVX2-FAST-NEXT: vpshufb %ymm8, %ymm2, %ymm9 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm7, %ymm9, %ymm7 ; AVX2-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128] ; AVX2-FAST-NEXT: # ymm9 = mem[0,1,0,1] ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm14, %ymm15 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25] ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm13, %ymm14 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm15, %ymm14, %ymm14 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm15 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0] ; AVX2-FAST-NEXT: vpblendvb %ymm15, %ymm7, %ymm14, %ymm7 ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm1, %ymm6 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX2-FAST-NEXT: vpshufb %ymm8, %ymm0, %ymm8 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm6, %ymm8, %ymm6 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm5, %ymm8 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm10, %ymm4 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3] ; AVX2-FAST-NEXT: vpor %ymm4, %ymm8, %ymm4 ; AVX2-FAST-NEXT: vpblendvb %ymm15, %ymm6, %ymm4, %ymm4 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [4,6,5,5,5,5,4,6] ; AVX2-FAST-NEXT: vpermd %ymm12, %ymm6, %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm14 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm14, %ymm7, %ymm8, %ymm9 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm6, %ymm6 ; AVX2-FAST-NEXT: vpblendvb %ymm14, %ymm4, %ymm6, %ymm7 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [3,3,3,u,4,4,4,4] ; AVX2-FAST-NEXT: vpermd %ymm3, %ymm4, %ymm3 ; AVX2-FAST-NEXT: vpbroadcastq {{.*#+}} ymm6 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14] ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm2, %ymm2 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255] ; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm3, %ymm2, %ymm2 ; AVX2-FAST-NEXT: vpermd %ymm1, %ymm4, %ymm1 ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpblendvb %ymm8, %ymm1, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128] ; AVX2-FAST-NEXT: vmovdqu (%rsp), %ymm3 # 32-byte Reload ; AVX2-FAST-NEXT: vpshufb %ymm1, %ymm3, %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128] ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm13, %ymm6 ; AVX2-FAST-NEXT: vpor %ymm3, %ymm6, %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255] ; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm2, %ymm3, %ymm2 ; AVX2-FAST-NEXT: vpshufb %ymm1, %ymm5, %ymm1 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm10, %ymm3 ; AVX2-FAST-NEXT: vpor %ymm1, %ymm3, %ymm1 ; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm1 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [3,3,3,3,u,4,4,4] ; AVX2-FAST-NEXT: vpermd %ymm12, %ymm3, %ymm0 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm2, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm3, %ymm2 ; AVX2-FAST-NEXT: vpblendvb %ymm4, %ymm1, %ymm2, %ymm1 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; AVX2-FAST-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload ; AVX2-FAST-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7] ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-FAST-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload ; AVX2-FAST-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3],xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX2-FAST-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm3, %xmm3 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm2, %ymm3, %ymm2 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-FAST-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload ; AVX2-FAST-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3],xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7] ; AVX2-FAST-NEXT: vpshufb %xmm4, %xmm3, %xmm3 ; AVX2-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; AVX2-FAST-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload ; AVX2-FAST-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1],xmm4[2],mem[2],xmm4[3],mem[3],xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7] ; AVX2-FAST-NEXT: vpshufb %xmm5, %xmm4, %xmm4 ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm3, %ymm4, %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,0,0,0,0,0,1,1] ; AVX2-FAST-NEXT: vpermd %ymm11, %ymm4, %ymm5 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm2, %ymm5, %ymm2 ; AVX2-FAST-NEXT: vpermd %ymm12, %ymm4, %ymm4 ; AVX2-FAST-NEXT: vpblendvb %ymm6, %ymm3, %ymm4, %ymm3 ; AVX2-FAST-NEXT: vmovdqa %ymm1, 64(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm0, 224(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm7, 96(%r9) ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-NEXT: vmovaps %ymm0, 128(%r9) ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-NEXT: vmovaps %ymm0, 288(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm9, 256(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm3, 160(%r9) ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-NEXT: vmovaps %ymm0, 192(%r9) ; AVX2-FAST-NEXT: vmovdqa %ymm2, (%r9) ; AVX2-FAST-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-NEXT: vmovaps %ymm0, 32(%r9) ; AVX2-FAST-NEXT: addq $168, %rsp ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: store_i8_stride5_vf64: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: subq $200, %rsp ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %ymm11 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %xmm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, (%rsp) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %xmm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm0 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm1, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %xmm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdx), %xmm9 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm2 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm3, %xmm3 ; AVX2-FAST-PERLANE-NEXT: vpor %xmm1, %xmm3, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %xmm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm3 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm4, %xmm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %xmm6 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm5 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm5, %xmm6, %xmm6 ; AVX2-FAST-PERLANE-NEXT: vpor %xmm4, %xmm6, %xmm4 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm6, %ymm1, %ymm4, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %xmm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm4 = xmm4[1,1,2,2] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,1,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm1, %ymm4, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm1, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %xmm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm0, %xmm8, %xmm0 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm2, %xmm9, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %xmm2 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm3, %xmm4, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm5, %xmm2, %xmm2 ; AVX2-FAST-PERLANE-NEXT: vpor %xmm1, %xmm2, %xmm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rcx), %ymm3 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm6, %ymm0, %ymm1, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %xmm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[1,1,2,2] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,1,1,1] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm7, %ymm0, %ymm1, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,29,26,128,28,128,30,128,28,29,128,31,128,29] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm0, %ymm11, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vbroadcasti128 {{.*#+}} ymm8 = [25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128] ; AVX2-FAST-PERLANE-NEXT: # ymm8 = mem[0,1,0,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm8, %ymm3, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, %ymm12 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm5 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,29,26,128,28,128,26,27,28,29,128,31,128,29,30,128] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm5, %ymm4, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rsi), %ymm2 ; AVX2-FAST-PERLANE-NEXT: vbroadcasti128 {{.*#+}} ymm9 = [27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30] ; AVX2-FAST-PERLANE-NEXT: # ymm9 = mem[0,1,0,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm2, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm6, %ymm7, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm10 = [255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u,0,0,255,255,u] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm1, %ymm6, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdx), %ymm13 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm0, %ymm13, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rcx), %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm8, %ymm7, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm1 = ymm1[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm0, %ymm1, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm5, %ymm1, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm5 = ymm0[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsi), %ymm0 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm0, %ymm9 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm5, %ymm9, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm8, %ymm5, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%r8), %ymm5 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm5[2,2,3,3,6,6,7,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm10 = [255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm3, %ymm9, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm3, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%r8), %ymm3 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm9 = ymm3[2,2,3,3,6,6,7,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm10, %ymm8, %ymm9, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vmovdqu %ymm6, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,19,20,128,22,128,24,128,22,23,128,25,128,23] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm8, %ymm4, %ymm9 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm9 = ymm9[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vbroadcasti128 {{.*#+}} ymm10 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128] ; AVX2-FAST-PERLANE-NEXT: # ymm10 = mem[0,1,0,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm10, %ymm2, %ymm15 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm9, %ymm15, %ymm9 ; AVX2-FAST-PERLANE-NEXT: vbroadcasti128 {{.*#+}} ymm15 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128] ; AVX2-FAST-PERLANE-NEXT: # ymm15 = mem[0,1,0,1] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm15, %ymm12, %ymm14 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm11, %ymm12 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm14, %ymm12, %ymm12 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm14 = [255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm14, %ymm9, %ymm12, %ymm9 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm8, %ymm1, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm10, %ymm0, %ymm10 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm8, %ymm10, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm15, %ymm7, %ymm10 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm10 = ymm10[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm13, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX2-FAST-PERLANE-NEXT: vpor %ymm6, %ymm10, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm14, %ymm8, %ymm6, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm8 = ymm5[0,2,1,1,4,6,5,5] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm12 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm12, %ymm9, %ymm8, %ymm10 ; AVX2-FAST-PERLANE-NEXT: vpshufd {{.*#+}} ymm8 = ymm3[0,2,1,1,4,6,5,5] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,3,3,2] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm12, %ymm6, %ymm8, %ymm9 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm6 = [3,3,3,u,4,4,4,4] ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm4, %ymm6, %ymm4 ; AVX2-FAST-PERLANE-NEXT: vpbroadcastq {{.*#+}} ymm8 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm8, %ymm2, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm12 = [u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255,0,u,u,u,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm12, %ymm4, %ymm2, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm1, %ymm6, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm8, %ymm0, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm12, %ymm1, %ymm0, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128] ; AVX2-FAST-PERLANE-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm4 # 32-byte Reload ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm1, %ymm4, %ymm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm6 = [128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm11, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vpor %ymm4, %ymm8, %ymm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm8 = [u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm8, %ymm2, %ymm4, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm1, %ymm7, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm13, %ymm4 ; AVX2-FAST-PERLANE-NEXT: vpor %ymm1, %ymm4, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm8, %ymm0, %ymm1, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm4 = [3,3,3,3,u,4,4,4] ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm5, %ymm4, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm5 = [0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm5, %ymm2, %ymm0, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vpermd %ymm3, %ymm4, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm5, %ymm1, %ymm2, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload ; AVX2-FAST-PERLANE-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm2, %xmm2 # 16-byte Folded Reload ; AVX2-FAST-PERLANE-NEXT: # xmm2 = xmm2[0],mem[0],xmm2[1],mem[1],xmm2[2],mem[2],xmm2[3],mem[3],xmm2[4],mem[4],xmm2[5],mem[5],xmm2[6],mem[6],xmm2[7],mem[7] ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rsp), %xmm3 # 16-byte Reload ; AVX2-FAST-PERLANE-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload ; AVX2-FAST-PERLANE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3],xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm4 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm4, %xmm2, %xmm2 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} xmm5 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm5, %xmm3, %xmm3 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255,0,0,u,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm6, %ymm2, %ymm3, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload ; AVX2-FAST-PERLANE-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm3, %xmm3 # 16-byte Folded Reload ; AVX2-FAST-PERLANE-NEXT: # xmm3 = xmm3[0],mem[0],xmm3[1],mem[1],xmm3[2],mem[2],xmm3[3],mem[3],xmm3[4],mem[4],xmm3[5],mem[5],xmm3[6],mem[6],xmm3[7],mem[7] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm4, %xmm3, %xmm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; AVX2-FAST-PERLANE-NEXT: vpunpcklbw {{[-0-9]+}}(%r{{[sb]}}p), %xmm4, %xmm4 # 16-byte Folded Reload ; AVX2-FAST-PERLANE-NEXT: # xmm4 = xmm4[0],mem[0],xmm4[1],mem[1],xmm4[2],mem[2],xmm4[3],mem[3],xmm4[4],mem[4],xmm4[5],mem[5],xmm4[6],mem[6],xmm4[7],mem[7] ; AVX2-FAST-PERLANE-NEXT: vpshufb %xmm5, %xmm4, %xmm4 ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm6, %ymm3, %ymm4, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vpshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload ; AVX2-FAST-PERLANE-NEXT: # xmm4 = mem[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm5 = [255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm5, %ymm2, %ymm4, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpshufd $80, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload ; AVX2-FAST-PERLANE-NEXT: # xmm4 = mem[0,0,1,1] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,0,1] ; AVX2-FAST-PERLANE-NEXT: vpblendvb %ymm5, %ymm3, %ymm4, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm1, 64(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 224(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm9, 96(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 128(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 288(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm10, 256(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 160(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 192(%r9) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, (%r9) ; AVX2-FAST-PERLANE-NEXT: vmovups {{[-0-9]+}}(%r{{[sb]}}p), %ymm0 # 32-byte Reload ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm0, 32(%r9) ; AVX2-FAST-PERLANE-NEXT: addq $200, %rsp ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512F-SLOW-LABEL: store_i8_stride5_vf64: ; AVX512F-SLOW: # %bb.0: ; AVX512F-SLOW-NEXT: vmovdqa 32(%rsi), %ymm3 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm15 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128] ; AVX512F-SLOW-NEXT: vpshufb %ymm15, %ymm3, %ymm0 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %ymm2 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm9 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19] ; AVX512F-SLOW-NEXT: vpshufb %ymm9, %ymm2, %ymm1 ; AVX512F-SLOW-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX512F-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdi), %xmm1 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11] ; AVX512F-SLOW-NEXT: vpshufb %xmm5, %xmm1, %xmm0 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm1, %xmm16 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rsi), %xmm4 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128] ; AVX512F-SLOW-NEXT: vpshufb %xmm14, %xmm4, %xmm1 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm4, %xmm31 ; AVX512F-SLOW-NEXT: vpor %xmm0, %xmm1, %xmm0 ; AVX512F-SLOW-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512F-SLOW-NEXT: vmovdqa 32(%rcx), %ymm8 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128] ; AVX512F-SLOW-NEXT: vpshufb %ymm0, %ymm8, %ymm4 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdx), %ymm11 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u] ; AVX512F-SLOW-NEXT: vpshufb %ymm1, %ymm11, %ymm10 ; AVX512F-SLOW-NEXT: vpor %ymm4, %ymm10, %ymm4 ; AVX512F-SLOW-NEXT: vmovdqu %ymm4, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512F-SLOW-NEXT: vmovdqa 32(%rcx), %xmm13 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm6 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12] ; AVX512F-SLOW-NEXT: vpshufb %xmm6, %xmm13, %xmm4 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm6, %xmm25 ; AVX512F-SLOW-NEXT: vmovdqa 32(%rdx), %xmm10 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128] ; AVX512F-SLOW-NEXT: vpshufb %xmm6, %xmm10, %xmm12 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm6, %xmm26 ; AVX512F-SLOW-NEXT: vporq %xmm4, %xmm12, %xmm20 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm4 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u] ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm22 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm2[26],zero,ymm2[28],zero,zero,ymm2[27],zero,ymm2[29],zero,ymm2[31],zero,zero,ymm2[30],zero ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm2[21],zero,zero,ymm2[20],zero,ymm2[22],zero,ymm2[24],zero,zero,ymm2[23],zero,ymm2[25],zero,zero ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm2, %zmm23 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm2 = ymm11[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm11[27],zero,zero,ymm11[26],zero,ymm11[28],zero,ymm11[30],zero,zero,ymm11[29],zero,ymm11[31],zero,zero ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm8[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm8[19],zero,ymm8[21],zero,zero,ymm8[20],zero,ymm8[22],zero,ymm8[24],zero,zero,ymm8[23],zero ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm24 ; AVX512F-SLOW-NEXT: vmovdqa (%rcx), %ymm12 ; AVX512F-SLOW-NEXT: vpshufb %ymm0, %ymm12, %ymm0 ; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %ymm6 ; AVX512F-SLOW-NEXT: vpshufb %ymm1, %ymm6, %ymm1 ; AVX512F-SLOW-NEXT: vporq %ymm0, %ymm1, %ymm19 ; AVX512F-SLOW-NEXT: vmovdqa (%rsi), %ymm7 ; AVX512F-SLOW-NEXT: vpshufb %ymm15, %ymm7, %ymm2 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %ymm4 ; AVX512F-SLOW-NEXT: vpshufb %ymm9, %ymm4, %ymm3 ; AVX512F-SLOW-NEXT: vporq %ymm2, %ymm3, %ymm21 ; AVX512F-SLOW-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512F-SLOW-NEXT: vpshufb %xmm5, %xmm0, %xmm3 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm0, %xmm17 ; AVX512F-SLOW-NEXT: vmovdqa (%rsi), %xmm5 ; AVX512F-SLOW-NEXT: vpshufb %xmm14, %xmm5, %xmm9 ; AVX512F-SLOW-NEXT: vporq %xmm3, %xmm9, %xmm27 ; AVX512F-SLOW-NEXT: vmovdqa (%rcx), %xmm1 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm25, %xmm0 ; AVX512F-SLOW-NEXT: vpshufb %xmm0, %xmm1, %xmm0 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm1, %xmm18 ; AVX512F-SLOW-NEXT: vmovdqa (%rdx), %xmm9 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm26, %xmm1 ; AVX512F-SLOW-NEXT: vpshufb %xmm1, %xmm9, %xmm15 ; AVX512F-SLOW-NEXT: vporq %xmm0, %xmm15, %xmm29 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} xmm0 = mem[1,1,2,2] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,1,1,1] ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm28 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX512F-SLOW-NEXT: vpandnq %ymm0, %ymm28, %ymm0 ; AVX512F-SLOW-NEXT: vmovdqa 32(%r8), %ymm15 ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} ymm1 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128] ; AVX512F-SLOW-NEXT: vpshufb %ymm1, %ymm15, %ymm14 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm14, %zmm0, %zmm26 ; AVX512F-SLOW-NEXT: vmovdqa (%r8), %ymm0 ; AVX512F-SLOW-NEXT: vpshufb %ymm1, %ymm0, %ymm1 ; AVX512F-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2] ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX512F-SLOW-NEXT: vpandnq %ymm0, %ymm30, %ymm0 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm25 ; AVX512F-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12] ; AVX512F-SLOW-NEXT: vpshufb %ymm0, %ymm8, %ymm8 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm14 = ymm12[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm12[19],zero,ymm12[21],zero,zero,ymm12[20],zero,ymm12[22],zero,ymm12[24],zero,zero,ymm12[23],zero ; AVX512F-SLOW-NEXT: vpshufb %ymm0, %ymm12, %ymm12 ; AVX512F-SLOW-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25] ; AVX512F-SLOW-NEXT: # ymm0 = mem[0,1,0,1] ; AVX512F-SLOW-NEXT: vpshufb %ymm0, %ymm11, %ymm11 ; AVX512F-SLOW-NEXT: vpshufb %ymm0, %ymm6, %ymm2 ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[27],zero,zero,ymm6[26],zero,ymm6[28],zero,ymm6[30],zero,zero,ymm6[29],zero,ymm6[31],zero,zero ; AVX512F-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3],xmm13[4],xmm10[4],xmm13[5],xmm10[5],xmm13[6],xmm10[6],xmm13[7],xmm10[7] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm13 = ymm7[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm7 = ymm7[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero,ymm4[25],zero,zero ; AVX512F-SLOW-NEXT: vpshufb {{.*#+}} ymm3 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[26],zero,ymm4[28],zero,zero,ymm4[27],zero,ymm4[29],zero,ymm4[31],zero,zero,ymm4[30],zero ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm16, %xmm1 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm31, %xmm4 ; AVX512F-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm1[0],xmm4[0],xmm1[1],xmm4[1],xmm1[2],xmm4[2],xmm1[3],xmm4[3],xmm1[4],xmm4[4],xmm1[5],xmm4[5],xmm1[6],xmm4[6],xmm1[7],xmm4[7] ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [4,u,5,5,5,5,u,6,6,6,6,u,7,7,7,7] ; AVX512F-SLOW-NEXT: vpermd %zmm15, %zmm1, %zmm31 ; AVX512F-SLOW-NEXT: vmovdqa64 (%r8), %zmm16 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm1 = [6,6,6,u,7,7,7,7,u,16,16,16,16,u,17,17] ; AVX512F-SLOW-NEXT: vpermi2d %zmm15, %zmm16, %zmm1 ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm17, %xmm15 ; AVX512F-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm15[0],xmm5[0],xmm15[1],xmm5[1],xmm15[2],xmm5[2],xmm15[3],xmm5[3],xmm15[4],xmm5[4],xmm15[5],xmm5[5],xmm15[6],xmm5[6],xmm15[7],xmm5[7] ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm15 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512F-SLOW-NEXT: vpshufb %xmm15, %xmm4, %xmm4 ; AVX512F-SLOW-NEXT: vpshufb %xmm15, %xmm5, %xmm5 ; AVX512F-SLOW-NEXT: vinserti32x4 $2, %xmm27, %zmm5, %zmm5 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm13[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX512F-SLOW-NEXT: vmovdqa64 %xmm18, %xmm15 ; AVX512F-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm9 = xmm15[0],xmm9[0],xmm15[1],xmm9[1],xmm15[2],xmm9[2],xmm15[3],xmm9[3],xmm15[4],xmm9[4],xmm15[5],xmm9[5],xmm15[6],xmm9[6],xmm15[7],xmm9[7] ; AVX512F-SLOW-NEXT: vmovdqa {{.*#+}} xmm15 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512F-SLOW-NEXT: vpshufb %xmm15, %xmm10, %xmm10 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX512F-SLOW-NEXT: vpshufb %xmm15, %xmm9, %xmm9 ; AVX512F-SLOW-NEXT: vinserti32x4 $2, %xmm29, %zmm9, %zmm9 ; AVX512F-SLOW-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm15 # 32-byte Folded Reload ; AVX512F-SLOW-NEXT: # ymm15 = mem[0,0,1,1] ; AVX512F-SLOW-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm15, %zmm15 # 32-byte Folded Reload ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} ymm17 = ymm20[0,0,1,1] ; AVX512F-SLOW-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm17, %zmm17 # 32-byte Folded Reload ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm18 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0] ; AVX512F-SLOW-NEXT: vpternlogq $226, %zmm15, %zmm18, %zmm17 ; AVX512F-SLOW-NEXT: vpternlogq $248, %zmm28, %zmm17, %zmm26 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} zmm15 = zmm22[2,2,3,3,6,6,7,7] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} zmm17 = zmm23[2,2,3,3,6,6,7,7] ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm20 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975] ; AVX512F-SLOW-NEXT: vpternlogq $248, %zmm20, %zmm15, %zmm17 ; AVX512F-SLOW-NEXT: vpandq %ymm20, %ymm8, %ymm8 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm8, %zmm11, %zmm8 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} zmm11 = zmm24[2,2,3,3,6,6,7,7] ; AVX512F-SLOW-NEXT: vporq %zmm11, %zmm8, %zmm8 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm11 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX512F-SLOW-NEXT: vpternlogq $226, %zmm17, %zmm11, %zmm8 ; AVX512F-SLOW-NEXT: vpternlogd $184, %zmm8, %zmm30, %zmm31 ; AVX512F-SLOW-NEXT: vpor %ymm2, %ymm14, %ymm2 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm2, %zmm19, %zmm2 ; AVX512F-SLOW-NEXT: vpternlogq $248, %ymm20, %ymm13, %ymm0 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm21, %zmm0 ; AVX512F-SLOW-NEXT: vpternlogq $226, %zmm2, %zmm18, %zmm0 ; AVX512F-SLOW-NEXT: vpternlogq $248, %ymm20, %ymm12, %ymm6 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm10, %zmm6, %zmm2 ; AVX512F-SLOW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm7, %ymm3 ; AVX512F-SLOW-NEXT: vinserti64x4 $1, %ymm4, %zmm3, %zmm3 ; AVX512F-SLOW-NEXT: vpternlogq $226, %zmm2, %zmm11, %zmm3 ; AVX512F-SLOW-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm25 ; AVX512F-SLOW-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm3, %zmm1 ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} zmm0 = zmm5[0,0,1,1,4,4,5,5] ; AVX512F-SLOW-NEXT: vpermq {{.*#+}} zmm2 = zmm9[0,0,1,1,4,4,5,5] ; AVX512F-SLOW-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2 ; AVX512F-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,0,0,0,0,u,1,1,1,1,u,2,2,2,2,u] ; AVX512F-SLOW-NEXT: vpermd %zmm16, %zmm0, %zmm0 ; AVX512F-SLOW-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm25, 64(%r9) ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm0, (%r9) ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm1, 128(%r9) ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm31, 256(%r9) ; AVX512F-SLOW-NEXT: vmovdqa64 %zmm26, 192(%r9) ; AVX512F-SLOW-NEXT: vzeroupper ; AVX512F-SLOW-NEXT: retq ; ; AVX512F-FAST-LABEL: store_i8_stride5_vf64: ; AVX512F-FAST: # %bb.0: ; AVX512F-FAST-NEXT: subq $24, %rsp ; AVX512F-FAST-NEXT: vmovdqa 32(%rsi), %ymm3 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm8 = [128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128,128,128] ; AVX512F-FAST-NEXT: vpshufb %ymm8, %ymm3, %ymm0 ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = [12,13,128,15,12,13,14,128,12,13,14,15,128,u,u,u,16,128,18,19,16,17,128,19,16,17,18,128,16,17,18,19] ; AVX512F-FAST-NEXT: vpshufb %ymm11, %ymm1, %ymm2 ; AVX512F-FAST-NEXT: vpor %ymm0, %ymm2, %ymm0 ; AVX512F-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512F-FAST-NEXT: vmovdqa 32(%rdi), %xmm0 ; AVX512F-FAST-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm5 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11] ; AVX512F-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm0 ; AVX512F-FAST-NEXT: vmovdqa 32(%rsi), %xmm4 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm13 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128] ; AVX512F-FAST-NEXT: vpshufb %xmm13, %xmm4, %xmm2 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm4, %xmm19 ; AVX512F-FAST-NEXT: vpor %xmm0, %xmm2, %xmm0 ; AVX512F-FAST-NEXT: vmovdqu %ymm0, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512F-FAST-NEXT: vmovdqa 32(%rcx), %ymm7 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm0 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128] ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm7, %ymm2 ; AVX512F-FAST-NEXT: vmovdqa 32(%rdx), %ymm9 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm15 = [u,u,12,13,128,u,u,u,14,128,u,u,14,15,128,u,u,u,16,128,u,u,16,17,128,u,u,u,18,128,u,u] ; AVX512F-FAST-NEXT: vpshufb %ymm15, %ymm9, %ymm4 ; AVX512F-FAST-NEXT: vpor %ymm2, %ymm4, %ymm2 ; AVX512F-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512F-FAST-NEXT: vmovdqa 32(%rcx), %xmm12 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm4 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12] ; AVX512F-FAST-NEXT: vpshufb %xmm4, %xmm12, %xmm2 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm4, %xmm25 ; AVX512F-FAST-NEXT: vmovdqa 32(%rdx), %xmm10 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm6 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128] ; AVX512F-FAST-NEXT: vpshufb %xmm6, %xmm10, %xmm4 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm6, %xmm26 ; AVX512F-FAST-NEXT: vpor %xmm2, %xmm4, %xmm2 ; AVX512F-FAST-NEXT: vmovdqu %ymm2, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm3[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm3 = ymm3[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u] ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm21 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm1[26],zero,ymm1[28],zero,zero,ymm1[27],zero,ymm1[29],zero,ymm1[31],zero,zero,ymm1[30],zero ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm1[21],zero,zero,ymm1[20],zero,ymm1[22],zero,ymm1[24],zero,zero,ymm1[23],zero,ymm1[25],zero,zero ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm2, %zmm1, %zmm22 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm9[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm9[27],zero,zero,ymm9[26],zero,ymm9[28],zero,ymm9[30],zero,zero,ymm9[29],zero,ymm9[31],zero,zero ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm7[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm7[19],zero,ymm7[21],zero,zero,ymm7[20],zero,ymm7[22],zero,ymm7[24],zero,zero,ymm7[23],zero ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm2, %zmm23 ; AVX512F-FAST-NEXT: vmovdqa (%rcx), %ymm4 ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm4, %ymm0 ; AVX512F-FAST-NEXT: vmovdqa (%rdx), %ymm14 ; AVX512F-FAST-NEXT: vpshufb %ymm15, %ymm14, %ymm1 ; AVX512F-FAST-NEXT: vporq %ymm0, %ymm1, %ymm24 ; AVX512F-FAST-NEXT: vmovdqa (%rsi), %ymm15 ; AVX512F-FAST-NEXT: vpshufb %ymm8, %ymm15, %ymm0 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %ymm6 ; AVX512F-FAST-NEXT: vpshufb %ymm11, %ymm6, %ymm1 ; AVX512F-FAST-NEXT: vporq %ymm0, %ymm1, %ymm20 ; AVX512F-FAST-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512F-FAST-NEXT: vpshufb %xmm5, %xmm0, %xmm1 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm0, %xmm16 ; AVX512F-FAST-NEXT: vmovdqa (%rsi), %xmm3 ; AVX512F-FAST-NEXT: vpshufb %xmm13, %xmm3, %xmm2 ; AVX512F-FAST-NEXT: vporq %xmm1, %xmm2, %xmm28 ; AVX512F-FAST-NEXT: vmovdqa (%rcx), %xmm1 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm25, %xmm0 ; AVX512F-FAST-NEXT: vpshufb %xmm0, %xmm1, %xmm8 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm1, %xmm18 ; AVX512F-FAST-NEXT: vmovdqa (%rdx), %xmm1 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm26, %xmm0 ; AVX512F-FAST-NEXT: vpshufb %xmm0, %xmm1, %xmm11 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm1, %xmm17 ; AVX512F-FAST-NEXT: vporq %xmm8, %xmm11, %xmm29 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm11 = [1,1,2,2,2,2,2,2] ; AVX512F-FAST-NEXT: vmovdqa 32(%r8), %ymm8 ; AVX512F-FAST-NEXT: vpermd %ymm8, %ymm11, %ymm11 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm25 = [255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255] ; AVX512F-FAST-NEXT: vpandnq %ymm11, %ymm25, %ymm11 ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} ymm0 = [12,128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128] ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm8, %ymm13 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm13, %zmm11, %zmm26 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm31 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31] ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm30 = [255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0,255,255,255,255,0] ; AVX512F-FAST-NEXT: vmovdqa (%r8), %ymm11 ; AVX512F-FAST-NEXT: vpermd %ymm11, %ymm31, %ymm27 ; AVX512F-FAST-NEXT: vpandnq %ymm27, %ymm30, %ymm27 ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm11, %ymm0 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm27, %zmm0, %zmm27 ; AVX512F-FAST-NEXT: vpbroadcastq {{.*#+}} ymm0 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12] ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm7, %ymm7 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm13 = ymm4[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm4[19],zero,ymm4[21],zero,zero,ymm4[20],zero,ymm4[22],zero,ymm4[24],zero,zero,ymm4[23],zero ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm4, %ymm5 ; AVX512F-FAST-NEXT: vbroadcasti128 {{.*#+}} ymm0 = [18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25] ; AVX512F-FAST-NEXT: # ymm0 = mem[0,1,0,1] ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm9, %ymm9 ; AVX512F-FAST-NEXT: vpshufb %ymm0, %ymm14, %ymm4 ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm14 = ymm14[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm14[27],zero,zero,ymm14[26],zero,ymm14[28],zero,ymm14[30],zero,zero,ymm14[29],zero,ymm14[31],zero,zero ; AVX512F-FAST-NEXT: vpunpcklbw {{.*#+}} xmm10 = xmm12[0],xmm10[0],xmm12[1],xmm10[1],xmm12[2],xmm10[2],xmm12[3],xmm10[3],xmm12[4],xmm10[4],xmm12[5],xmm10[5],xmm12[6],xmm10[6],xmm12[7],xmm10[7] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm12 = ymm15[3,u,5,u,u,4,u,6,u,8,u,u,7,u,9,u,19,u,21,u,u,20,u,22,u,24,u,u,23,u,25,u] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm15 = ymm15[11,u,u,10,u,12,u,u,u,u,13,u,15,u,u,14,27,u,u,26,u,28,u,u,u,u,29,u,31,u,u,30] ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm2 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm6[21],zero,zero,ymm6[20],zero,ymm6[22],zero,ymm6[24],zero,zero,ymm6[23],zero,ymm6[25],zero,zero ; AVX512F-FAST-NEXT: vpshufb {{.*#+}} ymm6 = ymm6[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm6[26],zero,ymm6[28],zero,zero,ymm6[27],zero,ymm6[29],zero,ymm6[31],zero,zero,ymm6[30],zero ; AVX512F-FAST-NEXT: vmovdqa64 %xmm19, %xmm1 ; AVX512F-FAST-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX512F-FAST-NEXT: vpunpcklbw {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3],xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7] ; AVX512F-FAST-NEXT: vmovdqa64 %xmm16, %xmm1 ; AVX512F-FAST-NEXT: vpunpcklbw {{.*#+}} xmm3 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512F-FAST-NEXT: vpshufb %xmm1, %xmm0, %xmm0 ; AVX512F-FAST-NEXT: vpshufb %xmm1, %xmm3, %xmm1 ; AVX512F-FAST-NEXT: vinserti32x4 $2, %xmm28, %zmm1, %zmm28 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm1 = ymm7[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm9[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm9 = ymm13[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm12 = ymm12[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm13 = ymm5[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm14 = ymm14[2,2,3,3] ; AVX512F-FAST-NEXT: vmovdqa64 %xmm18, %xmm3 ; AVX512F-FAST-NEXT: vmovdqa64 %xmm17, %xmm5 ; AVX512F-FAST-NEXT: vpunpcklbw {{.*#+}} xmm5 = xmm3[0],xmm5[0],xmm3[1],xmm5[1],xmm3[2],xmm5[2],xmm3[3],xmm5[3],xmm3[4],xmm5[4],xmm3[5],xmm5[5],xmm3[6],xmm5[6],xmm3[7],xmm5[7] ; AVX512F-FAST-NEXT: vmovdqa {{.*#+}} xmm3 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512F-FAST-NEXT: vpshufb %xmm3, %xmm10, %xmm10 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm10 = ymm10[0,0,1,1] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm6 = ymm6[2,2,3,3] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,0,1,1] ; AVX512F-FAST-NEXT: vpshufb %xmm3, %xmm5, %xmm3 ; AVX512F-FAST-NEXT: vinserti32x4 $2, %xmm29, %zmm3, %zmm3 ; AVX512F-FAST-NEXT: vmovdqa64 (%r8), %zmm5 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm11, %zmm5, %zmm11 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} zmm21 = zmm21[2,2,3,3,6,6,7,7] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7] ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm29 = [18374966859431608575,18374966859431608575,18446463693966278400,18446463693966278400,72056498804555775,72056498804555775,18374967950370078975,18374967950370078975] ; AVX512F-FAST-NEXT: vpternlogq $248, %zmm29, %zmm21, %zmm22 ; AVX512F-FAST-NEXT: vpandq %ymm29, %ymm1, %ymm1 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm7, %zmm1 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} zmm7 = zmm23[2,2,3,3,6,6,7,7] ; AVX512F-FAST-NEXT: vporq %zmm7, %zmm1, %zmm1 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255] ; AVX512F-FAST-NEXT: vpternlogq $226, %zmm22, %zmm7, %zmm1 ; AVX512F-FAST-NEXT: vpermt2d %zmm5, %zmm31, %zmm8 ; AVX512F-FAST-NEXT: vpternlogd $184, %zmm1, %zmm30, %zmm8 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm8, 256(%r9) ; AVX512F-FAST-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm1 # 32-byte Folded Reload ; AVX512F-FAST-NEXT: # ymm1 = mem[0,0,1,1] ; AVX512F-FAST-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm1, %zmm1 # 32-byte Folded Reload ; AVX512F-FAST-NEXT: vpermq $80, {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload ; AVX512F-FAST-NEXT: # ymm8 = mem[0,0,1,1] ; AVX512F-FAST-NEXT: vinserti64x4 $1, {{[-0-9]+}}(%r{{[sb]}}p), %zmm8, %zmm8 # 32-byte Folded Reload ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm16 = [255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0,0,255,255,255,0] ; AVX512F-FAST-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm8 ; AVX512F-FAST-NEXT: vpternlogq $248, %zmm25, %zmm8, %zmm26 ; AVX512F-FAST-NEXT: vpor %ymm4, %ymm9, %ymm1 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm1, %zmm24, %zmm1 ; AVX512F-FAST-NEXT: vpternlogq $248, %ymm29, %ymm12, %ymm2 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm2, %zmm20, %zmm2 ; AVX512F-FAST-NEXT: vpternlogq $226, %zmm1, %zmm16, %zmm2 ; AVX512F-FAST-NEXT: vpternlogq $248, %ymm29, %ymm13, %ymm14 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm10, %zmm14, %zmm1 ; AVX512F-FAST-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm15, %ymm6 ; AVX512F-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm6, %zmm0 ; AVX512F-FAST-NEXT: vpternlogq $226, %zmm1, %zmm7, %zmm0 ; AVX512F-FAST-NEXT: vpternlogq $248, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm27 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm1 = [6,6,6,u,7,7,7,7,u,8,8,8,8,u,9,9] ; AVX512F-FAST-NEXT: vpermd %zmm5, %zmm1, %zmm1 ; AVX512F-FAST-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm1 ; AVX512F-FAST-NEXT: vpermq {{.*#+}} zmm0 = zmm28[0,0,1,1,4,4,5,5] ; AVX512F-FAST-NEXT: vpermq {{.*#+}} zmm2 = zmm3[0,0,1,1,4,4,5,5] ; AVX512F-FAST-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm0, %zmm2 ; AVX512F-FAST-NEXT: vmovdqa64 {{.*#+}} zmm0 = [u,0,0,0,0,u,1,1,9,9,u,10,10,10,10,u] ; AVX512F-FAST-NEXT: vpermd %zmm11, %zmm0, %zmm0 ; AVX512F-FAST-NEXT: vpternlogd $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm2, %zmm0 ; AVX512F-FAST-NEXT: vmovdqa64 %zmm27, 64(%r9) ; AVX512F-FAST-NEXT: vmovdqa64 %zmm0, (%r9) ; AVX512F-FAST-NEXT: vmovdqa64 %zmm1, 128(%r9) ; AVX512F-FAST-NEXT: vmovdqa64 %zmm26, 192(%r9) ; AVX512F-FAST-NEXT: addq $24, %rsp ; AVX512F-FAST-NEXT: vzeroupper ; AVX512F-FAST-NEXT: retq ; ; AVX512BW-SLOW-LABEL: store_i8_stride5_vf64: ; AVX512BW-SLOW: # %bb.0: ; AVX512BW-SLOW-NEXT: vmovdqa64 (%r8), %zmm3 ; AVX512BW-SLOW-NEXT: vmovdqa (%rcx), %ymm0 ; AVX512BW-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm8 = [9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12,9,14,11,0,13,10,15,12] ; AVX512BW-SLOW-NEXT: vpshufb %ymm8, %ymm0, %ymm2 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdx), %ymm1 ; AVX512BW-SLOW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm1[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14] ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm4 = ymm4[2,2,3,3,6,6,7,7] ; AVX512BW-SLOW-NEXT: movl $693250386, %eax # imm = 0x29522952 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm4, %ymm2 {%k1} ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512BW-SLOW-NEXT: vmovdqa 32(%rdx), %xmm6 ; AVX512BW-SLOW-NEXT: vmovdqa 32(%rcx), %xmm12 ; AVX512BW-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm4 = xmm12[0],xmm6[0],xmm12[1],xmm6[1],xmm12[2],xmm6[2],xmm12[3],xmm6[3],xmm12[4],xmm6[4],xmm12[5],xmm6[5],xmm12[6],xmm6[6],xmm12[7],xmm6[7] ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} xmm7 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512BW-SLOW-NEXT: vpshufb %xmm7, %xmm4, %xmm4 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,0,1,1] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm4, %zmm2, %zmm10 ; AVX512BW-SLOW-NEXT: vmovdqa (%rsi), %ymm4 ; AVX512BW-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm15 = [11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14,11,0,13,10,15,12,0,14] ; AVX512BW-SLOW-NEXT: vpshufb %ymm15, %ymm4, %ymm2 ; AVX512BW-SLOW-NEXT: vmovdqa (%rdi), %ymm5 ; AVX512BW-SLOW-NEXT: vpshufhw {{.*#+}} ymm9 = ymm5[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15] ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[2,2,3,3,6,6,7,7] ; AVX512BW-SLOW-NEXT: movl $1251232404, %eax # imm = 0x4A944A94 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k5 ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm9, %ymm2 {%k5} ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512BW-SLOW-NEXT: vmovdqa 32(%rsi), %xmm13 ; AVX512BW-SLOW-NEXT: vmovdqa 32(%rdi), %xmm14 ; AVX512BW-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm11 = xmm14[0],xmm13[0],xmm14[1],xmm13[1],xmm14[2],xmm13[2],xmm14[3],xmm13[3],xmm14[4],xmm13[4],xmm14[5],xmm13[5],xmm14[6],xmm13[6],xmm14[7],xmm13[7] ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} xmm9 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512BW-SLOW-NEXT: vpshufb %xmm9, %xmm11, %xmm11 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm11 = ymm11[0,0,1,1] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm11, %zmm2, %zmm2 ; AVX512BW-SLOW-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k4 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k4} ; AVX512BW-SLOW-NEXT: vmovdqa64 32(%r8), %ymm16 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm10 = [6,6,6,6,7,7,7,7,16,16,16,16,16,16,17,17] ; AVX512BW-SLOW-NEXT: vpermi2d %zmm16, %zmm3, %zmm10 ; AVX512BW-SLOW-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k2 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm10, %zmm2 {%k2} ; AVX512BW-SLOW-NEXT: vmovdqa64 32(%rdx), %ymm23 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm10 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128] ; AVX512BW-SLOW-NEXT: vpshufb %ymm10, %ymm23, %ymm17 ; AVX512BW-SLOW-NEXT: vmovdqa64 32(%rcx), %ymm24 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm11 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128] ; AVX512BW-SLOW-NEXT: vpshufb %ymm11, %ymm24, %ymm18 ; AVX512BW-SLOW-NEXT: vporq %ymm17, %ymm18, %ymm17 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} xmm20 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12] ; AVX512BW-SLOW-NEXT: vpshufb %xmm20, %xmm12, %xmm12 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} xmm22 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128] ; AVX512BW-SLOW-NEXT: vpshufb %xmm22, %xmm6, %xmm6 ; AVX512BW-SLOW-NEXT: vpor %xmm6, %xmm12, %xmm6 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm6 = ymm6[0,0,1,1] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm17, %zmm6, %zmm6 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} xmm19 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11] ; AVX512BW-SLOW-NEXT: vpshufb %xmm19, %xmm14, %xmm12 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} xmm21 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128] ; AVX512BW-SLOW-NEXT: vpshufb %xmm21, %xmm13, %xmm13 ; AVX512BW-SLOW-NEXT: vpor %xmm12, %xmm13, %xmm12 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm13 = ymm12[0,0,1,1] ; AVX512BW-SLOW-NEXT: vmovdqa64 32(%rdi), %ymm25 ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = [3,3,3,u,4,4,4,4] ; AVX512BW-SLOW-NEXT: vpermd %ymm25, %ymm12, %ymm17 ; AVX512BW-SLOW-NEXT: vmovdqa64 32(%rsi), %ymm26 ; AVX512BW-SLOW-NEXT: vpbroadcastq {{.*#+}} ymm14 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14] ; AVX512BW-SLOW-NEXT: movl $138547332, %eax # imm = 0x8421084 ; AVX512BW-SLOW-NEXT: kmovd %eax, %k3 ; AVX512BW-SLOW-NEXT: vpshufb %ymm14, %ymm26, %ymm17 {%k3} ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm17, %zmm13, %zmm13 ; AVX512BW-SLOW-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k2 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm13, %zmm6 {%k2} ; AVX512BW-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = [3,3,3,3,u,4,4,4] ; AVX512BW-SLOW-NEXT: vpermd %ymm16, %ymm13, %ymm17 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} xmm18 = mem[1,1,2,2] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,1,1,1] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm17, %zmm18, %zmm17 ; AVX512BW-SLOW-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k6 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm17, %zmm6 {%k6} ; AVX512BW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} ymm17 = [19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128] ; AVX512BW-SLOW-NEXT: # ymm17 = mem[0,1,2,3,0,1,2,3] ; AVX512BW-SLOW-NEXT: vpshufb %ymm17, %ymm26, %ymm18 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm27 = ymm18[2,2,3,3] ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} ymm18 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128] ; AVX512BW-SLOW-NEXT: vpshufb %ymm18, %ymm25, %ymm28 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3] ; AVX512BW-SLOW-NEXT: vporq %ymm27, %ymm28, %ymm27 ; AVX512BW-SLOW-NEXT: vpshufb %ymm15, %ymm26, %ymm15 ; AVX512BW-SLOW-NEXT: vpshufhw {{.*#+}} ymm25 = ymm25[0,1,2,3,6,5,6,7,8,9,10,11,14,13,14,15] ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm25 = ymm25[2,2,3,3,6,6,7,7] ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm25, %ymm15 {%k5} ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm15 = ymm15[2,2,3,3] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm15, %zmm27, %zmm15 ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} ymm25 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25] ; AVX512BW-SLOW-NEXT: vpshufb %ymm25, %ymm23, %ymm26 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3] ; AVX512BW-SLOW-NEXT: vbroadcasti32x4 {{.*#+}} ymm27 = [128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128] ; AVX512BW-SLOW-NEXT: # ymm27 = mem[0,1,2,3,0,1,2,3] ; AVX512BW-SLOW-NEXT: vpshufb %ymm27, %ymm24, %ymm28 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3] ; AVX512BW-SLOW-NEXT: vporq %ymm26, %ymm28, %ymm26 ; AVX512BW-SLOW-NEXT: vpshufb %ymm8, %ymm24, %ymm8 ; AVX512BW-SLOW-NEXT: vpshufhw {{.*#+}} ymm23 = ymm23[0,1,2,3,5,6,7,6,8,9,10,11,13,14,15,14] ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm23 = ymm23[2,2,3,3,6,6,7,7] ; AVX512BW-SLOW-NEXT: vmovdqu8 %ymm23, %ymm8 {%k1} ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm8, %zmm26, %zmm8 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm15, %zmm8 {%k4} ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm15 = [4,6,5,5,5,5,4,6,6,6,6,6,7,7,7,7] ; AVX512BW-SLOW-NEXT: vpermd %zmm16, %zmm15, %zmm15 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdx), %xmm16 ; AVX512BW-SLOW-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm15, %zmm8 {%k1} ; AVX512BW-SLOW-NEXT: vmovdqa (%rcx), %xmm15 ; AVX512BW-SLOW-NEXT: vpshufb %xmm20, %xmm15, %xmm20 ; AVX512BW-SLOW-NEXT: vpshufb %xmm22, %xmm16, %xmm22 ; AVX512BW-SLOW-NEXT: vporq %xmm20, %xmm22, %xmm20 ; AVX512BW-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm15[0],xmm16[0],xmm15[1],xmm16[1],xmm15[2],xmm16[2],xmm15[3],xmm16[3],xmm15[4],xmm16[4],xmm15[5],xmm16[5],xmm15[6],xmm16[6],xmm15[7],xmm16[7] ; AVX512BW-SLOW-NEXT: vpshufb %xmm7, %xmm15, %xmm7 ; AVX512BW-SLOW-NEXT: vmovdqa (%rsi), %xmm15 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm20, %zmm7, %zmm7 ; AVX512BW-SLOW-NEXT: vmovdqa64 (%rdi), %xmm16 ; AVX512BW-SLOW-NEXT: vpshufb %xmm19, %xmm16, %xmm19 ; AVX512BW-SLOW-NEXT: vpshufb %xmm21, %xmm15, %xmm20 ; AVX512BW-SLOW-NEXT: vporq %xmm19, %xmm20, %xmm19 ; AVX512BW-SLOW-NEXT: vpunpcklbw {{.*#+}} xmm15 = xmm16[0],xmm15[0],xmm16[1],xmm15[1],xmm16[2],xmm15[2],xmm16[3],xmm15[3],xmm16[4],xmm15[4],xmm16[5],xmm15[5],xmm16[6],xmm15[6],xmm16[7],xmm15[7] ; AVX512BW-SLOW-NEXT: vpshufb %xmm9, %xmm15, %xmm9 ; AVX512BW-SLOW-NEXT: vinserti32x4 $2, %xmm19, %zmm9, %zmm9 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} zmm7 = zmm7[0,0,1,1,4,4,5,5] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} zmm9 = zmm9[0,0,1,1,4,4,5,5] ; AVX512BW-SLOW-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm7, %zmm9 {%k1} ; AVX512BW-SLOW-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,1,1,2,2,2,2,2,2] ; AVX512BW-SLOW-NEXT: vpermd %zmm3, %zmm7, %zmm3 ; AVX512BW-SLOW-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm3, %zmm9 {%k1} ; AVX512BW-SLOW-NEXT: vpshufb %ymm25, %ymm1, %ymm3 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpshufb %ymm27, %ymm0, %ymm7 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpor %ymm3, %ymm7, %ymm3 ; AVX512BW-SLOW-NEXT: vpshufb %ymm10, %ymm1, %ymm1 ; AVX512BW-SLOW-NEXT: vpshufb %ymm11, %ymm0, %ymm0 ; AVX512BW-SLOW-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm0, %zmm0 ; AVX512BW-SLOW-NEXT: vpermd %ymm5, %ymm12, %ymm1 ; AVX512BW-SLOW-NEXT: vpshufb %ymm14, %ymm4, %ymm1 {%k3} ; AVX512BW-SLOW-NEXT: vpshufb %ymm17, %ymm4, %ymm3 ; AVX512BW-SLOW-NEXT: vpshufb %ymm18, %ymm5, %ymm4 ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[2,2,3,3] ; AVX512BW-SLOW-NEXT: vpor %ymm3, %ymm4, %ymm3 ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm3, %zmm1, %zmm1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k2} ; AVX512BW-SLOW-NEXT: vmovdqa (%r8), %ymm0 ; AVX512BW-SLOW-NEXT: vpermd %ymm0, %ymm13, %ymm3 ; AVX512BW-SLOW-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,1,1,4,6,5,5] ; AVX512BW-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[2,3,3,2] ; AVX512BW-SLOW-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0 ; AVX512BW-SLOW-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421 ; AVX512BW-SLOW-NEXT: kmovq %rax, %k1 ; AVX512BW-SLOW-NEXT: vmovdqu8 %zmm0, %zmm1 {%k1} ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm1, 64(%r9) ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm9, (%r9) ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm8, 256(%r9) ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm6, 192(%r9) ; AVX512BW-SLOW-NEXT: vmovdqa64 %zmm2, 128(%r9) ; AVX512BW-SLOW-NEXT: vzeroupper ; AVX512BW-SLOW-NEXT: retq ; ; AVX512BW-FAST-LABEL: store_i8_stride5_vf64: ; AVX512BW-FAST: # %bb.0: ; AVX512BW-FAST-NEXT: vmovdqa64 (%r8), %zmm5 ; AVX512BW-FAST-NEXT: vmovdqa64 32(%rdx), %ymm21 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm1 = [128,128,12,13,128,128,128,128,14,128,128,128,14,15,128,128,128,128,16,128,128,128,16,17,128,128,128,128,18,128,128,128] ; AVX512BW-FAST-NEXT: vpshufb %ymm1, %ymm21, %ymm0 ; AVX512BW-FAST-NEXT: vmovdqa 32(%rcx), %ymm13 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [128,128,128,128,13,128,128,128,128,14,128,128,128,128,15,128,128,128,128,16,128,128,128,128,17,128,128,128,128,18,128,128] ; AVX512BW-FAST-NEXT: vpshufb %ymm2, %ymm13, %ymm3 ; AVX512BW-FAST-NEXT: vpor %ymm0, %ymm3, %ymm0 ; AVX512BW-FAST-NEXT: vmovdqa (%rcx), %xmm6 ; AVX512BW-FAST-NEXT: vmovdqa64 32(%rcx), %xmm18 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm8 = [128,6,128,8,u,128,7,128,9,128,11,u,128,10,128,12] ; AVX512BW-FAST-NEXT: vpshufb %xmm8, %xmm18, %xmm3 ; AVX512BW-FAST-NEXT: vmovdqa (%rdx), %xmm7 ; AVX512BW-FAST-NEXT: vmovdqa64 32(%rdx), %xmm20 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm10 = [6,128,8,128,u,7,128,9,128,11,128,u,10,128,12,128] ; AVX512BW-FAST-NEXT: vpshufb %xmm10, %xmm20, %xmm4 ; AVX512BW-FAST-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,0,1,1] ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm0, %zmm3, %zmm0 ; AVX512BW-FAST-NEXT: vmovdqa (%rdi), %xmm9 ; AVX512BW-FAST-NEXT: vmovdqa64 32(%rdi), %xmm17 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm12 = [8,128,u,7,128,9,128,u,128,u,10,128,12,128,u,11] ; AVX512BW-FAST-NEXT: vpshufb %xmm12, %xmm17, %xmm3 ; AVX512BW-FAST-NEXT: vmovdqa (%rsi), %xmm11 ; AVX512BW-FAST-NEXT: vmovdqa64 32(%rsi), %xmm19 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} xmm14 = [128,8,u,128,7,128,9,u,11,u,128,10,128,12,u,128] ; AVX512BW-FAST-NEXT: vpshufb %xmm14, %xmm19, %xmm4 ; AVX512BW-FAST-NEXT: vpor %xmm3, %xmm4, %xmm3 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm15 = ymm3[0,0,1,1] ; AVX512BW-FAST-NEXT: vmovdqa64 32(%rdi), %ymm16 ; AVX512BW-FAST-NEXT: vmovdqa {{.*#+}} ymm3 = [3,3,3,u,4,4,4,4] ; AVX512BW-FAST-NEXT: vpermd %ymm16, %ymm3, %ymm22 ; AVX512BW-FAST-NEXT: vmovdqa64 32(%rsi), %ymm23 ; AVX512BW-FAST-NEXT: vpbroadcastq {{.*#+}} ymm4 = [0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14,0,0,13,2,15,0,1,14] ; AVX512BW-FAST-NEXT: movl $138547332, %eax # imm = 0x8421084 ; AVX512BW-FAST-NEXT: kmovd %eax, %k1 ; AVX512BW-FAST-NEXT: vpshufb %ymm4, %ymm23, %ymm22 {%k1} ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm22, %zmm15, %zmm15 ; AVX512BW-FAST-NEXT: movabsq $-8330787646191410408, %rax # imm = 0x8C6318C6318C6318 ; AVX512BW-FAST-NEXT: kmovq %rax, %k2 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm15, %zmm0 {%k2} ; AVX512BW-FAST-NEXT: vmovdqa64 32(%r8), %ymm24 ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm15 = [1,1,2,2,2,2,2,2,27,27,27,27,u,28,28,28] ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm24, %zmm15 ; AVX512BW-FAST-NEXT: movabsq $4760450083537948804, %rax # imm = 0x4210842108421084 ; AVX512BW-FAST-NEXT: kmovq %rax, %k3 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm15, %zmm0 {%k3} ; AVX512BW-FAST-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm23[0,1,2,3],mem[4,5,6,7] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm15 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,19,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27,128,128,26,128,28,128,128,128,128,29,128,31,128,128,30] ; AVX512BW-FAST-NEXT: vpshufb %zmm15, %zmm22, %zmm22 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm22 = zmm22[2,2,3,3,6,6,7,7] ; AVX512BW-FAST-NEXT: vshufi64x2 {{.*#+}} zmm23 = zmm16[0,1,2,3],mem[4,5,6,7] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm16 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,21,128,128,20,128,22,128,24,128,128,23,128,25,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,26,128,28,128,128,27,128,29,128,31,128,128,30,128] ; AVX512BW-FAST-NEXT: vpshufb %zmm16, %zmm23, %zmm23 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm23 = zmm23[2,2,3,3,6,6,7,7] ; AVX512BW-FAST-NEXT: vporq %zmm22, %zmm23, %zmm23 ; AVX512BW-FAST-NEXT: vshufi64x2 {{.*#+}} zmm22 = zmm21[0,1,2,3],mem[4,5,6,7] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm21 = [128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,18,19,128,21,128,21,20,128,22,128,24,128,22,23,128,25,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128] ; AVX512BW-FAST-NEXT: vpshufb %zmm21, %zmm22, %zmm22 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm25 = zmm22[2,2,3,3,6,6,7,7] ; AVX512BW-FAST-NEXT: vshufi64x2 {{.*#+}} zmm13 = zmm13[0,1,2,3],mem[4,5,6,7] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm22 = [u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,128,128,19,128,21,128,128,20,128,22,128,24,128,128,23,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,128,27,128,128,26,128,28,128,30,128,128,29,128,31,128,128] ; AVX512BW-FAST-NEXT: vpshufb %zmm22, %zmm13, %zmm13 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm13 = zmm13[2,2,3,3,6,6,7,7] ; AVX512BW-FAST-NEXT: vporq %zmm25, %zmm13, %zmm13 ; AVX512BW-FAST-NEXT: movabsq $1785168781326730801, %rax # imm = 0x18C6318C6318C631 ; AVX512BW-FAST-NEXT: kmovq %rax, %k3 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm23, %zmm13 {%k3} ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm23 = [4,6,5,5,5,5,4,6,30,30,30,30,31,31,31,31] ; AVX512BW-FAST-NEXT: vpermi2d %zmm5, %zmm24, %zmm23 ; AVX512BW-FAST-NEXT: movabsq $-8925843906633654008, %rax # imm = 0x8421084210842108 ; AVX512BW-FAST-NEXT: kmovq %rax, %k4 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm23, %zmm13 {%k4} ; AVX512BW-FAST-NEXT: vmovdqa64 (%rcx), %ymm23 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm24 = ymm23[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,25],zero,ymm23[27],zero,zero,ymm23[26],zero,ymm23[28],zero,ymm23[30],zero,zero,ymm23[29],zero,ymm23[31],zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm25 = ymm24[2,2,3,3] ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdx), %ymm24 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm26 = ymm24[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,ymm24[27],zero,zero,ymm24[26],zero,ymm24[28],zero,ymm24[30],zero,zero,ymm24[29],zero,ymm24[31],zero,zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm26 = ymm26[2,2,3,3] ; AVX512BW-FAST-NEXT: vporq %ymm25, %ymm26, %ymm25 ; AVX512BW-FAST-NEXT: vpunpcklbw {{.*#+}} xmm18 = xmm18[0],xmm20[0],xmm18[1],xmm20[1],xmm18[2],xmm20[2],xmm18[3],xmm20[3],xmm18[4],xmm20[4],xmm18[5],xmm20[5],xmm18[6],xmm20[6],xmm18[7],xmm20[7] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} xmm26 = [2,u,1,0,5,4,u,3,u,7,6,11,10,u,9,8] ; AVX512BW-FAST-NEXT: vpshufb %xmm26, %xmm18, %xmm18 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm18 = ymm18[0,0,1,1] ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm18, %zmm25, %zmm25 ; AVX512BW-FAST-NEXT: vmovdqa64 (%rsi), %ymm18 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm20 = ymm18[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,27],zero,zero,ymm18[26],zero,ymm18[28],zero,zero,zero,zero,ymm18[29],zero,ymm18[31],zero,zero,ymm18[30] ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm27 = ymm20[2,2,3,3] ; AVX512BW-FAST-NEXT: vmovdqa64 (%rdi), %ymm20 ; AVX512BW-FAST-NEXT: vpshufb {{.*#+}} ymm28 = ymm20[u,u,u,u,u,u,u,u,u,u,u,u,u,u,u,u],zero,zero,ymm20[26],zero,ymm20[28],zero,zero,ymm20[27],zero,ymm20[29],zero,ymm20[31],zero,zero,ymm20[30],zero ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm28 = ymm28[2,2,3,3] ; AVX512BW-FAST-NEXT: vporq %ymm27, %ymm28, %ymm27 ; AVX512BW-FAST-NEXT: vpunpcklbw {{.*#+}} xmm17 = xmm17[0],xmm19[0],xmm17[1],xmm19[1],xmm17[2],xmm19[2],xmm17[3],xmm19[3],xmm17[4],xmm19[4],xmm17[5],xmm19[5],xmm17[6],xmm19[6],xmm17[7],xmm19[7] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} xmm19 = [0,1,4,5,u,2,3,6,7,10,11,u,8,9,12,13] ; AVX512BW-FAST-NEXT: vpshufb %xmm19, %xmm17, %xmm17 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm17 = ymm17[0,0,1,1] ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm17, %zmm27, %zmm17 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm25, %zmm17 {%k3} ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm25 = [6,6,6,6,7,7,7,7,8,8,8,8,8,8,9,9] ; AVX512BW-FAST-NEXT: vpermd %zmm5, %zmm25, %zmm5 ; AVX512BW-FAST-NEXT: movabsq $2380225041768974402, %rax # imm = 0x2108421084210842 ; AVX512BW-FAST-NEXT: kmovq %rax, %k3 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm5, %zmm17 {%k3} ; AVX512BW-FAST-NEXT: vpshufb %xmm8, %xmm6, %xmm5 ; AVX512BW-FAST-NEXT: vpshufb %xmm10, %xmm7, %xmm8 ; AVX512BW-FAST-NEXT: vpor %xmm5, %xmm8, %xmm5 ; AVX512BW-FAST-NEXT: vpunpcklbw {{.*#+}} xmm6 = xmm6[0],xmm7[0],xmm6[1],xmm7[1],xmm6[2],xmm7[2],xmm6[3],xmm7[3],xmm6[4],xmm7[4],xmm6[5],xmm7[5],xmm6[6],xmm7[6],xmm6[7],xmm7[7] ; AVX512BW-FAST-NEXT: vpshufb %xmm26, %xmm6, %xmm6 ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm5, %zmm6, %zmm5 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm5 = zmm5[0,0,1,1,4,4,5,5] ; AVX512BW-FAST-NEXT: vpshufb %xmm12, %xmm9, %xmm6 ; AVX512BW-FAST-NEXT: vpshufb %xmm14, %xmm11, %xmm7 ; AVX512BW-FAST-NEXT: vpor %xmm6, %xmm7, %xmm6 ; AVX512BW-FAST-NEXT: vpunpcklbw {{.*#+}} xmm7 = xmm9[0],xmm11[0],xmm9[1],xmm11[1],xmm9[2],xmm11[2],xmm9[3],xmm11[3],xmm9[4],xmm11[4],xmm9[5],xmm11[5],xmm9[6],xmm11[6],xmm9[7],xmm11[7] ; AVX512BW-FAST-NEXT: vpshufb %xmm19, %xmm7, %xmm7 ; AVX512BW-FAST-NEXT: vinserti32x4 $2, %xmm6, %zmm7, %zmm6 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} zmm6 = zmm6[0,0,1,1,4,4,5,5] ; AVX512BW-FAST-NEXT: movabsq $-4165393823095705204, %rax # imm = 0xC6318C6318C6318C ; AVX512BW-FAST-NEXT: kmovq %rax, %k3 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm5, %zmm6 {%k3} ; AVX512BW-FAST-NEXT: vbroadcasti64x4 {{.*#+}} zmm5 = mem[0,1,2,3,0,1,2,3] ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm7 = [0,0,0,0,0,0,1,1,9,9,10,10,10,10,10,10] ; AVX512BW-FAST-NEXT: vpermd %zmm5, %zmm7, %zmm7 ; AVX512BW-FAST-NEXT: movabsq $595056260442243600, %rax # imm = 0x842108421084210 ; AVX512BW-FAST-NEXT: kmovq %rax, %k3 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm7, %zmm6 {%k3} ; AVX512BW-FAST-NEXT: vpshufb %ymm21, %ymm24, %ymm7 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512BW-FAST-NEXT: vpshufb %ymm22, %ymm23, %ymm8 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm8 = ymm8[2,2,3,3] ; AVX512BW-FAST-NEXT: vpor %ymm7, %ymm8, %ymm7 ; AVX512BW-FAST-NEXT: vpshufb %ymm1, %ymm24, %ymm1 ; AVX512BW-FAST-NEXT: vpshufb %ymm2, %ymm23, %ymm2 ; AVX512BW-FAST-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm7, %zmm1, %zmm1 ; AVX512BW-FAST-NEXT: vpshufb %ymm15, %ymm18, %ymm2 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[2,2,3,3] ; AVX512BW-FAST-NEXT: vpshufb %ymm16, %ymm20, %ymm7 ; AVX512BW-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[2,2,3,3] ; AVX512BW-FAST-NEXT: vpor %ymm2, %ymm7, %ymm2 ; AVX512BW-FAST-NEXT: vpermd %ymm20, %ymm3, %ymm3 ; AVX512BW-FAST-NEXT: vpshufb %ymm4, %ymm18, %ymm3 {%k1} ; AVX512BW-FAST-NEXT: vinserti64x4 $1, %ymm2, %zmm3, %zmm2 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm1, %zmm2 {%k2} ; AVX512BW-FAST-NEXT: vmovdqa64 {{.*#+}} zmm1 = [3,3,3,3,u,4,4,4,12,14,13,13,13,13,12,14] ; AVX512BW-FAST-NEXT: vpermd %zmm5, %zmm1, %zmm1 ; AVX512BW-FAST-NEXT: movabsq $1190112520884487201, %rax # imm = 0x1084210842108421 ; AVX512BW-FAST-NEXT: kmovq %rax, %k1 ; AVX512BW-FAST-NEXT: vmovdqu8 %zmm1, %zmm2 {%k1} ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm2, 64(%r9) ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm6, (%r9) ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm17, 128(%r9) ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm13, 256(%r9) ; AVX512BW-FAST-NEXT: vmovdqa64 %zmm0, 192(%r9) ; AVX512BW-FAST-NEXT: vzeroupper ; AVX512BW-FAST-NEXT: retq %in.vec0 = load <64 x i8>, ptr %in.vecptr0, align 64 %in.vec1 = load <64 x i8>, ptr %in.vecptr1, align 64 %in.vec2 = load <64 x i8>, ptr %in.vecptr2, align 64 %in.vec3 = load <64 x i8>, ptr %in.vecptr3, align 64 %in.vec4 = load <64 x i8>, ptr %in.vecptr4, align 64 %1 = shufflevector <64 x i8> %in.vec0, <64 x i8> %in.vec1, <128 x i32> %2 = shufflevector <64 x i8> %in.vec2, <64 x i8> %in.vec3, <128 x i32> %3 = shufflevector <128 x i8> %1, <128 x i8> %2, <256 x i32> %4 = shufflevector <64 x i8> %in.vec4, <64 x i8> poison, <256 x i32> %5 = shufflevector <256 x i8> %3, <256 x i8> %4, <320 x i32> %interleaved.vec = shufflevector <320 x i8> %5, <320 x i8> poison, <320 x i32> store <320 x i8> %interleaved.vec, ptr %out.vec, align 64 ret void } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; AVX1: {{.*}} ; AVX2: {{.*}} ; AVX512: {{.*}} ; AVX512-FAST: {{.*}} ; AVX512-SLOW: {{.*}} ; AVX512BW-ONLY: {{.*}} ; AVX512BW-ONLY-FAST: {{.*}} ; AVX512BW-ONLY-SLOW: {{.*}} ; AVX512DQ-FAST: {{.*}} ; AVX512DQ-ONLY: {{.*}} ; AVX512DQ-SLOW: {{.*}} ; AVX512DQBW-FAST: {{.*}} ; AVX512DQBW-ONLY: {{.*}} ; AVX512DQBW-SLOW: {{.*}} ; AVX512F-ONLY: {{.*}} ; AVX512F-ONLY-FAST: {{.*}} ; AVX512F-ONLY-SLOW: {{.*}} ; FALLBACK0: {{.*}} ; FALLBACK1: {{.*}} ; FALLBACK10: {{.*}} ; FALLBACK11: {{.*}} ; FALLBACK12: {{.*}} ; FALLBACK2: {{.*}} ; FALLBACK3: {{.*}} ; FALLBACK4: {{.*}} ; FALLBACK5: {{.*}} ; FALLBACK6: {{.*}} ; FALLBACK7: {{.*}} ; FALLBACK8: {{.*}} ; FALLBACK9: {{.*}}