; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE,FALLBACK0 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1,AVX1-ONLY,FALLBACK1 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-SLOW,FALLBACK2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST,FALLBACK3 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX1,AVX2,AVX2-ONLY,AVX2-FAST-PERLANE,FALLBACK4 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512F-ONLY-SLOW,FALLBACK5 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512F-ONLY,AVX512-FAST,AVX512F-FAST,AVX512F-ONLY-FAST,FALLBACK6 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-SLOW,AVX512F-SLOW,AVX512DQ-SLOW,FALLBACK7 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512F,AVX512DQ-ONLY,AVX512-FAST,AVX512F-FAST,AVX512DQ-FAST,FALLBACK8 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512BW-ONLY-SLOW,FALLBACK9 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512BW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512BW-ONLY-FAST,FALLBACK10 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-SLOW,AVX512BW-SLOW,AVX512DQBW-SLOW,FALLBACK11 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512vl,+avx512dq,+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX,AVX2,AVX512,AVX512BW,AVX512DQBW-ONLY,AVX512-FAST,AVX512BW-FAST,AVX512DQBW-FAST,FALLBACK12 ; These patterns are produced by LoopVectorizer for interleaved loads. define void @load_i16_stride2_vf2(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind { ; SSE-LABEL: load_i16_stride2_vf2: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] ; SSE-NEXT: movd %xmm1, (%rsi) ; SSE-NEXT: movd %xmm0, (%rdx) ; SSE-NEXT: retq ; ; AVX-LABEL: load_i16_stride2_vf2: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX-NEXT: vpshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] ; AVX-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[1,3,2,3,4,5,6,7] ; AVX-NEXT: vmovd %xmm1, (%rsi) ; AVX-NEXT: vmovd %xmm0, (%rdx) ; AVX-NEXT: retq %wide.vec = load <4 x i16>, ptr %in.vec, align 64 %strided.vec0 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> %strided.vec1 = shufflevector <4 x i16> %wide.vec, <4 x i16> poison, <2 x i32> store <2 x i16> %strided.vec0, ptr %out.vec0, align 64 store <2 x i16> %strided.vec1, ptr %out.vec1, align 64 ret void } define void @load_i16_stride2_vf4(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind { ; SSE-LABEL: load_i16_stride2_vf4: ; SSE: # %bb.0: ; SSE-NEXT: movdqa (%rdi), %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm0[0,2,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[3,1,2,3,4,5,6,7] ; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3] ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,0,3,2,4,5,6,7] ; SSE-NEXT: movq %xmm1, (%rsi) ; SSE-NEXT: movq %xmm0, (%rdx) ; SSE-NEXT: retq ; ; AVX1-LABEL: load_i16_stride2_vf4: ; AVX1: # %bb.0: ; AVX1-NEXT: vmovdqa (%rdi), %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u] ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u] ; AVX1-NEXT: vmovq %xmm1, (%rsi) ; AVX1-NEXT: vmovq %xmm0, (%rdx) ; AVX1-NEXT: retq ; ; AVX512-LABEL: load_i16_stride2_vf4: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512-NEXT: vpshufb {{.*#+}} xmm1 = xmm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u] ; AVX512-NEXT: vpmovdw %xmm0, (%rsi) ; AVX512-NEXT: vmovq %xmm1, (%rdx) ; AVX512-NEXT: retq %wide.vec = load <8 x i16>, ptr %in.vec, align 64 %strided.vec0 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> %strided.vec1 = shufflevector <8 x i16> %wide.vec, <8 x i16> poison, <4 x i32> store <4 x i16> %strided.vec0, ptr %out.vec0, align 64 store <4 x i16> %strided.vec1, ptr %out.vec1, align 64 ret void } define void @load_i16_stride2_vf8(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind { ; SSE-LABEL: load_i16_stride2_vf8: ; SSE: # %bb.0: ; SSE-NEXT: movdqa (%rdi), %xmm0 ; SSE-NEXT: movdqa 16(%rdi), %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: pslld $16, %xmm2 ; SSE-NEXT: psrad $16, %xmm2 ; SSE-NEXT: movdqa %xmm0, %xmm3 ; SSE-NEXT: pslld $16, %xmm3 ; SSE-NEXT: psrad $16, %xmm3 ; SSE-NEXT: packssdw %xmm2, %xmm3 ; SSE-NEXT: psrad $16, %xmm1 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: packssdw %xmm1, %xmm0 ; SSE-NEXT: movdqa %xmm3, (%rsi) ; SSE-NEXT: movdqa %xmm0, (%rdx) ; SSE-NEXT: retq ; ; AVX1-LABEL: load_i16_stride2_vf8: ; AVX1: # %bb.0: ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vmovdqa (%rdi), %xmm1 ; AVX1-NEXT: vmovdqa 16(%rdi), %xmm2 ; AVX1-NEXT: vpblendw {{.*#+}} xmm3 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7] ; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7] ; AVX1-NEXT: vpackusdw %xmm3, %xmm0, %xmm0 ; AVX1-NEXT: vpsrld $16, %xmm2, %xmm2 ; AVX1-NEXT: vpsrld $16, %xmm1, %xmm1 ; AVX1-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-NEXT: vmovdqa %xmm0, (%rsi) ; AVX1-NEXT: vmovdqa %xmm1, (%rdx) ; AVX1-NEXT: retq ; ; AVX512-LABEL: load_i16_stride2_vf8: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512-NEXT: vpsrld $16, %ymm0, %ymm1 ; AVX512-NEXT: vpmovdw %ymm0, (%rsi) ; AVX512-NEXT: vpmovdw %ymm1, (%rdx) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <16 x i16>, ptr %in.vec, align 64 %strided.vec0 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> %strided.vec1 = shufflevector <16 x i16> %wide.vec, <16 x i16> poison, <8 x i32> store <8 x i16> %strided.vec0, ptr %out.vec0, align 64 store <8 x i16> %strided.vec1, ptr %out.vec1, align 64 ret void } define void @load_i16_stride2_vf16(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind { ; SSE-LABEL: load_i16_stride2_vf16: ; SSE: # %bb.0: ; SSE-NEXT: movdqa (%rdi), %xmm0 ; SSE-NEXT: movdqa 16(%rdi), %xmm2 ; SSE-NEXT: movdqa 32(%rdi), %xmm1 ; SSE-NEXT: movdqa 48(%rdi), %xmm3 ; SSE-NEXT: movdqa %xmm3, %xmm4 ; SSE-NEXT: pslld $16, %xmm4 ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: movdqa %xmm1, %xmm5 ; SSE-NEXT: pslld $16, %xmm5 ; SSE-NEXT: psrad $16, %xmm5 ; SSE-NEXT: packssdw %xmm4, %xmm5 ; SSE-NEXT: movdqa %xmm2, %xmm4 ; SSE-NEXT: pslld $16, %xmm4 ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: movdqa %xmm0, %xmm6 ; SSE-NEXT: pslld $16, %xmm6 ; SSE-NEXT: psrad $16, %xmm6 ; SSE-NEXT: packssdw %xmm4, %xmm6 ; SSE-NEXT: psrad $16, %xmm3 ; SSE-NEXT: psrad $16, %xmm1 ; SSE-NEXT: packssdw %xmm3, %xmm1 ; SSE-NEXT: psrad $16, %xmm2 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: packssdw %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm6, (%rsi) ; SSE-NEXT: movdqa %xmm5, 16(%rsi) ; SSE-NEXT: movdqa %xmm0, (%rdx) ; SSE-NEXT: movdqa %xmm1, 16(%rdx) ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: load_i16_stride2_vf16: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm3 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm4 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm4[0],xmm0[1],xmm4[2],xmm0[3],xmm4[4],xmm0[5],xmm4[6],xmm0[7] ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4],xmm0[5],xmm3[6],xmm0[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm5, %xmm6, %xmm5 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7] ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm6, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm5, 16(%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 16(%rdx) ; AVX1-ONLY-NEXT: retq ; ; AVX2-SLOW-LABEL: load_i16_stride2_vf16: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm2 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm3 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm3 = ymm3[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm3[0,2],ymm2[0,2],ymm3[4,6],ymm2[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-SLOW-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-SLOW-NEXT: vmovaps %ymm2, (%rsi) ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: load_i16_stride2_vf16: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u] ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm1, %ymm3 ; AVX2-FAST-NEXT: vpshufb %ymm2, %ymm0, %ymm2 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-FAST-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-NEXT: vmovaps %ymm2, (%rsi) ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: load_i16_stride2_vf16: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm2 = [0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm1, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm2, %ymm0, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm2[0,2],ymm3[0,2],ymm2[4,6],ymm3[4,6] ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-FAST-PERLANE-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, (%rsi) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512-LABEL: load_i16_stride2_vf16: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512-NEXT: vpsrld $16, %zmm0, %zmm1 ; AVX512-NEXT: vpmovdw %zmm0, (%rsi) ; AVX512-NEXT: vpmovdw %zmm1, (%rdx) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %wide.vec = load <32 x i16>, ptr %in.vec, align 64 %strided.vec0 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> %strided.vec1 = shufflevector <32 x i16> %wide.vec, <32 x i16> poison, <16 x i32> store <16 x i16> %strided.vec0, ptr %out.vec0, align 64 store <16 x i16> %strided.vec1, ptr %out.vec1, align 64 ret void } define void @load_i16_stride2_vf32(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind { ; SSE-LABEL: load_i16_stride2_vf32: ; SSE: # %bb.0: ; SSE-NEXT: movdqa 64(%rdi), %xmm0 ; SSE-NEXT: movdqa 80(%rdi), %xmm4 ; SSE-NEXT: movdqa 96(%rdi), %xmm1 ; SSE-NEXT: movdqa 112(%rdi), %xmm6 ; SSE-NEXT: movdqa (%rdi), %xmm2 ; SSE-NEXT: movdqa 16(%rdi), %xmm7 ; SSE-NEXT: movdqa 32(%rdi), %xmm3 ; SSE-NEXT: movdqa 48(%rdi), %xmm9 ; SSE-NEXT: movdqa %xmm9, %xmm8 ; SSE-NEXT: pslld $16, %xmm8 ; SSE-NEXT: psrad $16, %xmm8 ; SSE-NEXT: movdqa %xmm3, %xmm5 ; SSE-NEXT: pslld $16, %xmm5 ; SSE-NEXT: psrad $16, %xmm5 ; SSE-NEXT: packssdw %xmm8, %xmm5 ; SSE-NEXT: movdqa %xmm7, %xmm10 ; SSE-NEXT: pslld $16, %xmm10 ; SSE-NEXT: psrad $16, %xmm10 ; SSE-NEXT: movdqa %xmm2, %xmm8 ; SSE-NEXT: pslld $16, %xmm8 ; SSE-NEXT: psrad $16, %xmm8 ; SSE-NEXT: packssdw %xmm10, %xmm8 ; SSE-NEXT: movdqa %xmm6, %xmm11 ; SSE-NEXT: pslld $16, %xmm11 ; SSE-NEXT: psrad $16, %xmm11 ; SSE-NEXT: movdqa %xmm1, %xmm10 ; SSE-NEXT: pslld $16, %xmm10 ; SSE-NEXT: psrad $16, %xmm10 ; SSE-NEXT: packssdw %xmm11, %xmm10 ; SSE-NEXT: movdqa %xmm4, %xmm11 ; SSE-NEXT: pslld $16, %xmm11 ; SSE-NEXT: psrad $16, %xmm11 ; SSE-NEXT: movdqa %xmm0, %xmm12 ; SSE-NEXT: pslld $16, %xmm12 ; SSE-NEXT: psrad $16, %xmm12 ; SSE-NEXT: packssdw %xmm11, %xmm12 ; SSE-NEXT: psrad $16, %xmm9 ; SSE-NEXT: psrad $16, %xmm3 ; SSE-NEXT: packssdw %xmm9, %xmm3 ; SSE-NEXT: psrad $16, %xmm7 ; SSE-NEXT: psrad $16, %xmm2 ; SSE-NEXT: packssdw %xmm7, %xmm2 ; SSE-NEXT: psrad $16, %xmm6 ; SSE-NEXT: psrad $16, %xmm1 ; SSE-NEXT: packssdw %xmm6, %xmm1 ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: packssdw %xmm4, %xmm0 ; SSE-NEXT: movdqa %xmm12, 32(%rsi) ; SSE-NEXT: movdqa %xmm10, 48(%rsi) ; SSE-NEXT: movdqa %xmm8, (%rsi) ; SSE-NEXT: movdqa %xmm5, 16(%rsi) ; SSE-NEXT: movdqa %xmm0, 32(%rdx) ; SSE-NEXT: movdqa %xmm1, 48(%rdx) ; SSE-NEXT: movdqa %xmm2, (%rdx) ; SSE-NEXT: movdqa %xmm3, 16(%rdx) ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: load_i16_stride2_vf32: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm1 ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm2 ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm3 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm4 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm5 = xmm4[0],xmm0[1],xmm4[2],xmm0[3],xmm4[4],xmm0[5],xmm4[6],xmm0[7] ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm3[0],xmm0[1],xmm3[2],xmm0[3],xmm3[4],xmm0[5],xmm3[6],xmm0[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm5, %xmm6, %xmm5 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm6 = xmm2[0],xmm0[1],xmm2[2],xmm0[3],xmm2[4],xmm0[5],xmm2[6],xmm0[7] ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm7 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm6, %xmm7, %xmm6 ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm7 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm7[0],xmm0[1],xmm7[2],xmm0[3],xmm7[4],xmm0[5],xmm7[6],xmm0[7] ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm9 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm10 = xmm9[0],xmm0[1],xmm9[2],xmm0[3],xmm9[4],xmm0[5],xmm9[6],xmm0[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm8, %xmm10, %xmm8 ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm10 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm10[0],xmm0[1],xmm10[2],xmm0[3],xmm10[4],xmm0[5],xmm10[6],xmm0[7] ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm12 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm12[0],xmm0[1],xmm12[2],xmm0[3],xmm12[4],xmm0[5],xmm12[6],xmm0[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm11, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm1, %xmm1 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm7, %xmm2 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm9, %xmm4 ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm4, %xmm2 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm10, %xmm4 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm12, %xmm7 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm7, %xmm4 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 32(%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm8, 48(%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm6, (%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm5, 16(%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm4, 32(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 48(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm1, (%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 16(%rdx) ; AVX1-ONLY-NEXT: retq ; ; AVX2-SLOW-LABEL: load_i16_stride2_vf32: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm2 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm3 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm4 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm4 = ymm4[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm2[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm4 = ymm5[0,2],ymm4[0,2],ymm5[4,6],ymm4[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm5 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm5 = ymm5[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm6 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm6 = ymm6[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm3, %ymm3 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm7 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm2, %ymm2 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-SLOW-NEXT: vpshufb %ymm6, %ymm1, %ymm1 ; AVX2-SLOW-NEXT: vpshufb %ymm7, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-SLOW-NEXT: vmovaps %ymm5, (%rsi) ; AVX2-SLOW-NEXT: vmovaps %ymm4, 32(%rsi) ; AVX2-SLOW-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-SLOW-NEXT: vmovdqa %ymm2, 32(%rdx) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: load_i16_stride2_vf32: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm2 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u] ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm3, %ymm5 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm2, %ymm6 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm1, %ymm6 ; AVX2-FAST-NEXT: vpshufb %ymm4, %ymm0, %ymm4 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm3, %ymm3 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm7 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm2, %ymm2 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm6, %ymm1, %ymm1 ; AVX2-FAST-NEXT: vpshufb %ymm7, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-NEXT: vmovaps %ymm4, (%rsi) ; AVX2-FAST-NEXT: vmovaps %ymm5, 32(%rsi) ; AVX2-FAST-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-FAST-NEXT: vmovdqa %ymm2, 32(%rdx) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: load_i16_stride2_vf32: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm2 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm4 = [0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm3, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm2, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm5 = ymm6[0,2],ymm5[0,2],ymm6[4,6],ymm5[4,6] ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm5 = ymm5[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm1, %ymm6 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm4, %ymm0, %ymm4 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm4 = ymm4[0,2],ymm6[0,2],ymm4[4,6],ymm6[4,6] ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm6 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm3, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm7 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm2, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm2 = ymm2[0,1],ymm3[2,3],ymm2[4,5],ymm3[6,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm6, %ymm1, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm7, %ymm0, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm4, (%rsi) ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm5, 32(%rsi) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, (%rdx) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm2, 32(%rdx) ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512F-LABEL: load_i16_stride2_vf32: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm1 ; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm2 ; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm3 ; AVX512F-NEXT: vpmovdw %zmm1, 32(%rsi) ; AVX512F-NEXT: vpmovdw %zmm0, (%rsi) ; AVX512F-NEXT: vpmovdw %zmm3, 32(%rdx) ; AVX512F-NEXT: vpmovdw %zmm2, (%rdx) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: load_i16_stride2_vf32: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm2 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62] ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm2 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63] ; AVX512BW-NEXT: vpermi2w %zmm1, %zmm0, %zmm3 ; AVX512BW-NEXT: vmovdqa64 %zmm2, (%rsi) ; AVX512BW-NEXT: vmovdqa64 %zmm3, (%rdx) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %wide.vec = load <64 x i16>, ptr %in.vec, align 64 %strided.vec0 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> %strided.vec1 = shufflevector <64 x i16> %wide.vec, <64 x i16> poison, <32 x i32> store <32 x i16> %strided.vec0, ptr %out.vec0, align 64 store <32 x i16> %strided.vec1, ptr %out.vec1, align 64 ret void } define void @load_i16_stride2_vf64(ptr %in.vec, ptr %out.vec0, ptr %out.vec1) nounwind { ; SSE-LABEL: load_i16_stride2_vf64: ; SSE: # %bb.0: ; SSE-NEXT: subq $40, %rsp ; SSE-NEXT: movdqa 160(%rdi), %xmm14 ; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 176(%rdi), %xmm2 ; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 64(%rdi), %xmm11 ; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 80(%rdi), %xmm1 ; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 96(%rdi), %xmm9 ; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 112(%rdi), %xmm4 ; SSE-NEXT: movdqa %xmm4, (%rsp) # 16-byte Spill ; SSE-NEXT: movdqa (%rdi), %xmm10 ; SSE-NEXT: movdqa %xmm10, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 16(%rdi), %xmm7 ; SSE-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 32(%rdi), %xmm13 ; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa 48(%rdi), %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: pslld $16, %xmm0 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: pslld $16, %xmm13 ; SSE-NEXT: psrad $16, %xmm13 ; SSE-NEXT: packssdw %xmm0, %xmm13 ; SSE-NEXT: movdqa %xmm7, %xmm0 ; SSE-NEXT: pslld $16, %xmm0 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: pslld $16, %xmm10 ; SSE-NEXT: psrad $16, %xmm10 ; SSE-NEXT: packssdw %xmm0, %xmm10 ; SSE-NEXT: movdqa %xmm4, %xmm0 ; SSE-NEXT: pslld $16, %xmm0 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: pslld $16, %xmm9 ; SSE-NEXT: psrad $16, %xmm9 ; SSE-NEXT: packssdw %xmm0, %xmm9 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: pslld $16, %xmm0 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: pslld $16, %xmm11 ; SSE-NEXT: psrad $16, %xmm11 ; SSE-NEXT: packssdw %xmm0, %xmm11 ; SSE-NEXT: movdqa %xmm2, %xmm0 ; SSE-NEXT: pslld $16, %xmm0 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: pslld $16, %xmm14 ; SSE-NEXT: psrad $16, %xmm14 ; SSE-NEXT: packssdw %xmm0, %xmm14 ; SSE-NEXT: movdqa 144(%rdi), %xmm7 ; SSE-NEXT: movdqa %xmm7, %xmm0 ; SSE-NEXT: pslld $16, %xmm0 ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: movdqa 128(%rdi), %xmm8 ; SSE-NEXT: movdqa %xmm8, %xmm15 ; SSE-NEXT: pslld $16, %xmm15 ; SSE-NEXT: psrad $16, %xmm15 ; SSE-NEXT: packssdw %xmm0, %xmm15 ; SSE-NEXT: movdqa 240(%rdi), %xmm12 ; SSE-NEXT: movdqa %xmm12, %xmm1 ; SSE-NEXT: pslld $16, %xmm1 ; SSE-NEXT: psrad $16, %xmm1 ; SSE-NEXT: movdqa 224(%rdi), %xmm5 ; SSE-NEXT: movdqa %xmm5, %xmm3 ; SSE-NEXT: pslld $16, %xmm3 ; SSE-NEXT: psrad $16, %xmm3 ; SSE-NEXT: packssdw %xmm1, %xmm3 ; SSE-NEXT: movdqa 208(%rdi), %xmm6 ; SSE-NEXT: movdqa %xmm6, %xmm4 ; SSE-NEXT: pslld $16, %xmm4 ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: movdqa 192(%rdi), %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: pslld $16, %xmm1 ; SSE-NEXT: psrad $16, %xmm1 ; SSE-NEXT: packssdw %xmm4, %xmm1 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: packssdw %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: packssdw %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: packssdw %xmm0, %xmm4 ; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm4 ; SSE-NEXT: packssdw %xmm0, %xmm4 ; SSE-NEXT: psrad $16, %xmm7 ; SSE-NEXT: psrad $16, %xmm8 ; SSE-NEXT: packssdw %xmm7, %xmm8 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm0 ; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; SSE-NEXT: psrad $16, %xmm7 ; SSE-NEXT: packssdw %xmm0, %xmm7 ; SSE-NEXT: psrad $16, %xmm6 ; SSE-NEXT: psrad $16, %xmm2 ; SSE-NEXT: packssdw %xmm6, %xmm2 ; SSE-NEXT: psrad $16, %xmm12 ; SSE-NEXT: psrad $16, %xmm5 ; SSE-NEXT: packssdw %xmm12, %xmm5 ; SSE-NEXT: movdqa %xmm1, 96(%rsi) ; SSE-NEXT: movdqa %xmm3, 112(%rsi) ; SSE-NEXT: movdqa %xmm15, 64(%rsi) ; SSE-NEXT: movdqa %xmm14, 80(%rsi) ; SSE-NEXT: movdqa %xmm11, 32(%rsi) ; SSE-NEXT: movdqa %xmm9, 48(%rsi) ; SSE-NEXT: movdqa %xmm10, (%rsi) ; SSE-NEXT: movdqa %xmm13, 16(%rsi) ; SSE-NEXT: movdqa %xmm5, 112(%rdx) ; SSE-NEXT: movdqa %xmm2, 96(%rdx) ; SSE-NEXT: movdqa %xmm7, 80(%rdx) ; SSE-NEXT: movdqa %xmm8, 64(%rdx) ; SSE-NEXT: movdqa %xmm4, 48(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 32(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, 16(%rdx) ; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; SSE-NEXT: movaps %xmm0, (%rdx) ; SSE-NEXT: addq $40, %rsp ; SSE-NEXT: retq ; ; AVX1-ONLY-LABEL: load_i16_stride2_vf64: ; AVX1-ONLY: # %bb.0: ; AVX1-ONLY-NEXT: subq $24, %rsp ; AVX1-ONLY-NEXT: vpxor %xmm11, %xmm11, %xmm11 ; AVX1-ONLY-NEXT: vmovdqa 176(%rdi), %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm11[1],xmm0[2],xmm11[3],xmm0[4],xmm11[5],xmm0[6],xmm11[7] ; AVX1-ONLY-NEXT: vmovdqa 160(%rdi), %xmm9 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm9[0],xmm11[1],xmm9[2],xmm11[3],xmm9[4],xmm11[5],xmm9[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm0, %xmm1, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa (%rdi), %xmm6 ; AVX1-ONLY-NEXT: vmovdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 16(%rdi), %xmm8 ; AVX1-ONLY-NEXT: vmovdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 32(%rdi), %xmm7 ; AVX1-ONLY-NEXT: vmovdqa 48(%rdi), %xmm10 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm0 = xmm10[0],xmm11[1],xmm10[2],xmm11[3],xmm10[4],xmm11[5],xmm10[6],xmm11[7] ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm7[0],xmm11[1],xmm7[2],xmm11[3],xmm7[4],xmm11[5],xmm7[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm0, %xmm1, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 144(%rdi), %xmm5 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm5[0],xmm11[1],xmm5[2],xmm11[3],xmm5[4],xmm11[5],xmm5[6],xmm11[7] ; AVX1-ONLY-NEXT: vmovdqa 128(%rdi), %xmm4 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm4[0],xmm11[1],xmm4[2],xmm11[3],xmm4[4],xmm11[5],xmm4[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm3, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm8[0],xmm11[1],xmm8[2],xmm11[3],xmm8[4],xmm11[5],xmm8[6],xmm11[7] ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm11[1],xmm6[2],xmm11[3],xmm6[4],xmm11[5],xmm6[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm3, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 112(%rdi), %xmm12 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm2 = xmm12[0],xmm11[1],xmm12[2],xmm11[3],xmm12[4],xmm11[5],xmm12[6],xmm11[7] ; AVX1-ONLY-NEXT: vmovdqa 96(%rdi), %xmm13 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm13[0],xmm11[1],xmm13[2],xmm11[3],xmm13[4],xmm11[5],xmm13[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm2, %xmm8, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 80(%rdi), %xmm15 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm14 = xmm15[0],xmm11[1],xmm15[2],xmm11[3],xmm15[4],xmm11[5],xmm15[6],xmm11[7] ; AVX1-ONLY-NEXT: vmovdqa 64(%rdi), %xmm6 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm3 = xmm6[0],xmm11[1],xmm6[2],xmm11[3],xmm6[4],xmm11[5],xmm6[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm14, %xmm3, %xmm0 ; AVX1-ONLY-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; AVX1-ONLY-NEXT: vmovdqa 240(%rdi), %xmm3 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm3[0],xmm11[1],xmm3[2],xmm11[3],xmm3[4],xmm11[5],xmm3[6],xmm11[7] ; AVX1-ONLY-NEXT: vmovdqa 224(%rdi), %xmm0 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm8 = xmm0[0],xmm11[1],xmm0[2],xmm11[3],xmm0[4],xmm11[5],xmm0[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm1, %xmm8, %xmm14 ; AVX1-ONLY-NEXT: vmovdqa 208(%rdi), %xmm8 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm1 = xmm8[0],xmm11[1],xmm8[2],xmm11[3],xmm8[4],xmm11[5],xmm8[6],xmm11[7] ; AVX1-ONLY-NEXT: vmovdqa 192(%rdi), %xmm2 ; AVX1-ONLY-NEXT: vpblendw {{.*#+}} xmm11 = xmm2[0],xmm11[1],xmm2[2],xmm11[3],xmm2[4],xmm11[5],xmm2[6],xmm11[7] ; AVX1-ONLY-NEXT: vpackusdw %xmm1, %xmm11, %xmm1 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm3, %xmm3 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpackusdw %xmm3, %xmm0, %xmm11 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload ; AVX1-ONLY-NEXT: vpsrld $16, %xmm0, %xmm0 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm9, %xmm3 ; AVX1-ONLY-NEXT: vpackusdw %xmm0, %xmm3, %xmm0 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm3 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm4, %xmm4 ; AVX1-ONLY-NEXT: vpackusdw %xmm3, %xmm4, %xmm3 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm10, %xmm4 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm7, %xmm5 ; AVX1-ONLY-NEXT: vpackusdw %xmm4, %xmm5, %xmm4 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload ; AVX1-ONLY-NEXT: vpsrld $16, %xmm5, %xmm5 ; AVX1-ONLY-NEXT: vmovdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload ; AVX1-ONLY-NEXT: vpsrld $16, %xmm7, %xmm7 ; AVX1-ONLY-NEXT: vpackusdw %xmm5, %xmm7, %xmm5 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm8, %xmm7 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpackusdw %xmm7, %xmm2, %xmm2 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm12, %xmm7 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm13, %xmm8 ; AVX1-ONLY-NEXT: vpackusdw %xmm7, %xmm8, %xmm7 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm15, %xmm8 ; AVX1-ONLY-NEXT: vpsrld $16, %xmm6, %xmm6 ; AVX1-ONLY-NEXT: vpackusdw %xmm8, %xmm6, %xmm6 ; AVX1-ONLY-NEXT: vmovdqa %xmm1, 96(%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm14, 112(%rsi) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm1, 32(%rsi) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm1, 48(%rsi) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm1, (%rsi) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm1, 64(%rsi) ; AVX1-ONLY-NEXT: vmovaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm1, 16(%rsi) ; AVX1-ONLY-NEXT: vmovaps (%rsp), %xmm1 # 16-byte Reload ; AVX1-ONLY-NEXT: vmovaps %xmm1, 80(%rsi) ; AVX1-ONLY-NEXT: vmovdqa %xmm6, 32(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm7, 48(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm2, 96(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm5, (%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm4, 16(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm3, 64(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm0, 80(%rdx) ; AVX1-ONLY-NEXT: vmovdqa %xmm11, 112(%rdx) ; AVX1-ONLY-NEXT: addq $24, %rsp ; AVX1-ONLY-NEXT: retq ; ; AVX2-SLOW-LABEL: load_i16_stride2_vf64: ; AVX2-SLOW: # %bb.0: ; AVX2-SLOW-NEXT: vmovdqa 128(%rdi), %ymm0 ; AVX2-SLOW-NEXT: vmovdqa 160(%rdi), %ymm1 ; AVX2-SLOW-NEXT: vmovdqa 192(%rdi), %ymm4 ; AVX2-SLOW-NEXT: vmovdqa 224(%rdi), %ymm6 ; AVX2-SLOW-NEXT: vmovdqa (%rdi), %ymm3 ; AVX2-SLOW-NEXT: vmovdqa 32(%rdi), %ymm5 ; AVX2-SLOW-NEXT: vmovdqa 64(%rdi), %ymm8 ; AVX2-SLOW-NEXT: vmovdqa 96(%rdi), %ymm9 ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm2 = ymm9[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm2 = ymm2[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm7 = ymm8[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm2 = ymm7[0,2],ymm2[0,2],ymm7[4,6],ymm2[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm7 = ymm6[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm7 = ymm7[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm10 = ymm4[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm7 = ymm10[0,2],ymm7[0,2],ymm10[4,6],ymm7[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm7 = ymm7[0,2,1,3] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm10 = ymm5[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm10 = ymm10[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm11 = ymm3[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,2],ymm10[0,2],ymm11[4,6],ymm10[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,1,3] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm11 = ymm1[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm11 = ymm11[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vpshuflw {{.*#+}} ymm12 = ymm0[0,2,2,3,4,5,6,7,8,10,10,11,12,13,14,15] ; AVX2-SLOW-NEXT: vpshufhw {{.*#+}} ymm12 = ymm12[0,1,2,3,4,6,6,7,8,9,10,11,12,14,14,15] ; AVX2-SLOW-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[0,2],ymm12[4,6],ymm11[4,6] ; AVX2-SLOW-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3] ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm9, %ymm9 ; AVX2-SLOW-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm8, %ymm8 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm8 = ymm8[0,1],ymm9[2,3],ymm8[4,5],ymm9[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,1,3] ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm6, %ymm6 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm4, %ymm4 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm4 = ymm4[0,1],ymm6[2,3],ymm4[4,5],ymm6[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm5, %ymm5 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm3, %ymm3 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm5[2,3],ymm3[4,5],ymm5[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3] ; AVX2-SLOW-NEXT: vpshufb %ymm12, %ymm1, %ymm1 ; AVX2-SLOW-NEXT: vpshufb %ymm13, %ymm0, %ymm0 ; AVX2-SLOW-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-SLOW-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-SLOW-NEXT: vmovaps %ymm11, 64(%rsi) ; AVX2-SLOW-NEXT: vmovaps %ymm10, (%rsi) ; AVX2-SLOW-NEXT: vmovaps %ymm7, 96(%rsi) ; AVX2-SLOW-NEXT: vmovaps %ymm2, 32(%rsi) ; AVX2-SLOW-NEXT: vmovdqa %ymm0, 64(%rdx) ; AVX2-SLOW-NEXT: vmovdqa %ymm3, (%rdx) ; AVX2-SLOW-NEXT: vmovdqa %ymm4, 96(%rdx) ; AVX2-SLOW-NEXT: vmovdqa %ymm8, 32(%rdx) ; AVX2-SLOW-NEXT: vzeroupper ; AVX2-SLOW-NEXT: retq ; ; AVX2-FAST-LABEL: load_i16_stride2_vf64: ; AVX2-FAST: # %bb.0: ; AVX2-FAST-NEXT: vmovdqa 128(%rdi), %ymm0 ; AVX2-FAST-NEXT: vmovdqa 160(%rdi), %ymm1 ; AVX2-FAST-NEXT: vmovdqa 192(%rdi), %ymm3 ; AVX2-FAST-NEXT: vmovdqa 224(%rdi), %ymm4 ; AVX2-FAST-NEXT: vmovdqa (%rdi), %ymm5 ; AVX2-FAST-NEXT: vmovdqa 32(%rdi), %ymm6 ; AVX2-FAST-NEXT: vmovdqa 64(%rdi), %ymm7 ; AVX2-FAST-NEXT: vmovdqa 96(%rdi), %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u] ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm8, %ymm2 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm7, %ymm10 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,2],ymm2[0,2],ymm10[4,6],ymm2[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm4, %ymm10 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm3, %ymm11 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,2],ymm10[0,2],ymm11[4,6],ymm10[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm6, %ymm11 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm5, %ymm12 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[0,2],ymm12[4,6],ymm11[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm1, %ymm12 ; AVX2-FAST-NEXT: vpshufb %ymm9, %ymm0, %ymm9 ; AVX2-FAST-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,2],ymm12[0,2],ymm9[4,6],ymm12[4,6] ; AVX2-FAST-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3] ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm8, %ymm8 ; AVX2-FAST-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm7, %ymm7 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm4, %ymm4 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm3, %ymm3 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm6, %ymm4 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm5, %ymm5 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-FAST-NEXT: vpshufb %ymm12, %ymm1, %ymm1 ; AVX2-FAST-NEXT: vpshufb %ymm13, %ymm0, %ymm0 ; AVX2-FAST-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-NEXT: vmovaps %ymm9, 64(%rsi) ; AVX2-FAST-NEXT: vmovaps %ymm11, (%rsi) ; AVX2-FAST-NEXT: vmovaps %ymm10, 96(%rsi) ; AVX2-FAST-NEXT: vmovaps %ymm2, 32(%rsi) ; AVX2-FAST-NEXT: vmovdqa %ymm0, 64(%rdx) ; AVX2-FAST-NEXT: vmovdqa %ymm4, (%rdx) ; AVX2-FAST-NEXT: vmovdqa %ymm3, 96(%rdx) ; AVX2-FAST-NEXT: vmovdqa %ymm7, 32(%rdx) ; AVX2-FAST-NEXT: vzeroupper ; AVX2-FAST-NEXT: retq ; ; AVX2-FAST-PERLANE-LABEL: load_i16_stride2_vf64: ; AVX2-FAST-PERLANE: # %bb.0: ; AVX2-FAST-PERLANE-NEXT: vmovdqa 128(%rdi), %ymm0 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 160(%rdi), %ymm1 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 192(%rdi), %ymm3 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 224(%rdi), %ymm4 ; AVX2-FAST-PERLANE-NEXT: vmovdqa (%rdi), %ymm5 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 32(%rdi), %ymm6 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 64(%rdi), %ymm7 ; AVX2-FAST-PERLANE-NEXT: vmovdqa 96(%rdi), %ymm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm9 = [0,1,4,5,u,u,u,u,8,9,12,13,u,u,u,u,16,17,20,21,u,u,u,u,24,25,28,29,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm8, %ymm2 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm7, %ymm10 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm2 = ymm10[0,2],ymm2[0,2],ymm10[4,6],ymm2[4,6] ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm2 = ymm2[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm4, %ymm10 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm3, %ymm11 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm10 = ymm11[0,2],ymm10[0,2],ymm11[4,6],ymm10[4,6] ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm10 = ymm10[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm6, %ymm11 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm5, %ymm12 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm11 = ymm12[0,2],ymm11[0,2],ymm12[4,6],ymm11[4,6] ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm11 = ymm11[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm1, %ymm12 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm9, %ymm0, %ymm9 ; AVX2-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm9 = ymm9[0,2],ymm12[0,2],ymm9[4,6],ymm12[4,6] ; AVX2-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm9 = ymm9[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm12 = [u,u,u,u,u,u,u,u,2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm8, %ymm8 ; AVX2-FAST-PERLANE-NEXT: vmovdqa {{.*#+}} ymm13 = [2,3,6,7,10,11,14,15,u,u,u,u,u,u,u,u,18,19,22,23,26,27,30,31,u,u,u,u,u,u,u,u] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm7, %ymm7 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm7 = ymm7[0,1],ymm8[2,3],ymm7[4,5],ymm8[6,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm7 = ymm7[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm4, %ymm4 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm3, %ymm3 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm3 = ymm3[0,1],ymm4[2,3],ymm3[4,5],ymm4[6,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm6, %ymm4 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm5, %ymm5 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm4 = ymm5[0,1],ymm4[2,3],ymm5[4,5],ymm4[6,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm4 = ymm4[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm12, %ymm1, %ymm1 ; AVX2-FAST-PERLANE-NEXT: vpshufb %ymm13, %ymm0, %ymm0 ; AVX2-FAST-PERLANE-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX2-FAST-PERLANE-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm9, 64(%rsi) ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm11, (%rsi) ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm10, 96(%rsi) ; AVX2-FAST-PERLANE-NEXT: vmovaps %ymm2, 32(%rsi) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm0, 64(%rdx) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm4, (%rdx) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm3, 96(%rdx) ; AVX2-FAST-PERLANE-NEXT: vmovdqa %ymm7, 32(%rdx) ; AVX2-FAST-PERLANE-NEXT: vzeroupper ; AVX2-FAST-PERLANE-NEXT: retq ; ; AVX512F-LABEL: load_i16_stride2_vf64: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512F-NEXT: vmovdqa64 64(%rdi), %zmm1 ; AVX512F-NEXT: vmovdqa64 128(%rdi), %zmm2 ; AVX512F-NEXT: vmovdqa64 192(%rdi), %zmm3 ; AVX512F-NEXT: vpmovdw %zmm1, %ymm4 ; AVX512F-NEXT: vpsrld $16, %zmm1, %zmm1 ; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm5 ; AVX512F-NEXT: vpsrld $16, %zmm3, %zmm6 ; AVX512F-NEXT: vpsrld $16, %zmm2, %zmm7 ; AVX512F-NEXT: vpmovdw %zmm0, (%rsi) ; AVX512F-NEXT: vmovdqa %ymm4, 32(%rsi) ; AVX512F-NEXT: vpmovdw %zmm2, 64(%rsi) ; AVX512F-NEXT: vpmovdw %zmm3, 96(%rsi) ; AVX512F-NEXT: vpmovdw %zmm7, 64(%rdx) ; AVX512F-NEXT: vpmovdw %zmm6, 96(%rdx) ; AVX512F-NEXT: vpmovdw %zmm5, (%rdx) ; AVX512F-NEXT: vpmovdw %zmm1, 32(%rdx) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: load_i16_stride2_vf64: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0 ; AVX512BW-NEXT: vmovdqa64 64(%rdi), %zmm1 ; AVX512BW-NEXT: vmovdqa64 128(%rdi), %zmm2 ; AVX512BW-NEXT: vmovdqa64 192(%rdi), %zmm3 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [0,2,4,6,8,10,12,14,16,18,20,22,24,26,28,30,32,34,36,38,40,42,44,46,48,50,52,54,56,58,60,62] ; AVX512BW-NEXT: vmovdqa64 %zmm0, %zmm5 ; AVX512BW-NEXT: vpermt2w %zmm1, %zmm4, %zmm5 ; AVX512BW-NEXT: vpermi2w %zmm3, %zmm2, %zmm4 ; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm6 = [1,3,5,7,9,11,13,15,17,19,21,23,25,27,29,31,33,35,37,39,41,43,45,47,49,51,53,55,57,59,61,63] ; AVX512BW-NEXT: vpermt2w %zmm1, %zmm6, %zmm0 ; AVX512BW-NEXT: vpermt2w %zmm3, %zmm6, %zmm2 ; AVX512BW-NEXT: vmovdqa64 %zmm4, 64(%rsi) ; AVX512BW-NEXT: vmovdqa64 %zmm5, (%rsi) ; AVX512BW-NEXT: vmovdqa64 %zmm2, 64(%rdx) ; AVX512BW-NEXT: vmovdqa64 %zmm0, (%rdx) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq %wide.vec = load <128 x i16>, ptr %in.vec, align 64 %strided.vec0 = shufflevector <128 x i16> %wide.vec, <128 x i16> poison, <64 x i32> %strided.vec1 = shufflevector <128 x i16> %wide.vec, <128 x i16> poison, <64 x i32> store <64 x i16> %strided.vec0, ptr %out.vec0, align 64 store <64 x i16> %strided.vec1, ptr %out.vec1, align 64 ret void } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; AVX2: {{.*}} ; AVX2-ONLY: {{.*}} ; AVX512-FAST: {{.*}} ; AVX512-SLOW: {{.*}} ; AVX512BW-FAST: {{.*}} ; AVX512BW-ONLY: {{.*}} ; AVX512BW-ONLY-FAST: {{.*}} ; AVX512BW-ONLY-SLOW: {{.*}} ; AVX512BW-SLOW: {{.*}} ; AVX512DQ-FAST: {{.*}} ; AVX512DQ-ONLY: {{.*}} ; AVX512DQ-SLOW: {{.*}} ; AVX512DQBW-FAST: {{.*}} ; AVX512DQBW-ONLY: {{.*}} ; AVX512DQBW-SLOW: {{.*}} ; AVX512F-FAST: {{.*}} ; AVX512F-ONLY: {{.*}} ; AVX512F-ONLY-FAST: {{.*}} ; AVX512F-ONLY-SLOW: {{.*}} ; AVX512F-SLOW: {{.*}} ; FALLBACK0: {{.*}} ; FALLBACK1: {{.*}} ; FALLBACK10: {{.*}} ; FALLBACK11: {{.*}} ; FALLBACK12: {{.*}} ; FALLBACK2: {{.*}} ; FALLBACK3: {{.*}} ; FALLBACK4: {{.*}} ; FALLBACK5: {{.*}} ; FALLBACK6: {{.*}} ; FALLBACK7: {{.*}} ; FALLBACK8: {{.*}} ; FALLBACK9: {{.*}}