; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX512,AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512VL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BW ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-crosslane-shuffle,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-ALL ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw,+avx512vl,+fast-variable-perlane-shuffle | FileCheck %s --check-prefixes=AVX512,AVX512BWVL,AVX512BWVL-FAST-PERLANE define void @shuffle_v64i8_to_v32i8_1(ptr %L, ptr %S) nounwind { ; AVX512F-LABEL: shuffle_v64i8_to_v32i8_1: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1 ; AVX512F-NEXT: vpshufb {{.*#+}} ymm1 = ymm1[u,u,u,u,u,u,u,u,1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31] ; AVX512F-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[1,3,5,7,9,11,13,15,u,u,u,u,u,u,u,u,17,19,21,23,25,27,29,31,u,u,u,u,u,u,u,u] ; AVX512F-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0,1],ymm1[2,3],ymm0[4,5],ymm1[6,7] ; AVX512F-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX512F-NEXT: vmovdqa %ymm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BW-LABEL: shuffle_v64i8_to_v32i8_1: ; AVX512BW: # %bb.0: ; AVX512BW-NEXT: vpsrlw $8, (%rdi), %zmm0 ; AVX512BW-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BW-NEXT: vzeroupper ; AVX512BW-NEXT: retq ; ; AVX512BWVL-LABEL: shuffle_v64i8_to_v32i8_1: ; AVX512BWVL: # %bb.0: ; AVX512BWVL-NEXT: vpsrlw $8, (%rdi), %zmm0 ; AVX512BWVL-NEXT: vpmovwb %zmm0, (%rsi) ; AVX512BWVL-NEXT: vzeroupper ; AVX512BWVL-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <32 x i32> store <32 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v32i16_to_v16i16_1(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v32i16_to_v16i16_1: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrld $16, (%rdi), %zmm0 ; AVX512-NEXT: vpmovdw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, ptr %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <16 x i32> store <16 x i16> %strided.vec, ptr %S ret void } define void @shuffle_v16i32_to_v8i32_1(ptr %L, ptr %S) nounwind { ; AVX512F-LABEL: shuffle_v16i32_to_v8i32_1: ; AVX512F: # %bb.0: ; AVX512F-NEXT: vmovaps (%rdi), %ymm0 ; AVX512F-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],mem[1,3],ymm0[5,7],mem[5,7] ; AVX512F-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX512F-NEXT: vmovaps %ymm0, (%rsi) ; AVX512F-NEXT: vzeroupper ; AVX512F-NEXT: retq ; ; AVX512BWVL-FAST-ALL-LABEL: shuffle_v16i32_to_v8i32_1: ; AVX512BWVL-FAST-ALL: # %bb.0: ; AVX512BWVL-FAST-ALL-NEXT: vmovdqa (%rdi), %ymm0 ; AVX512BWVL-FAST-ALL-NEXT: vmovdqa {{.*#+}} ymm1 = [1,3,5,7,9,11,13,15] ; AVX512BWVL-FAST-ALL-NEXT: vpermi2d 32(%rdi), %ymm0, %ymm1 ; AVX512BWVL-FAST-ALL-NEXT: vmovdqa %ymm1, (%rsi) ; AVX512BWVL-FAST-ALL-NEXT: vzeroupper ; AVX512BWVL-FAST-ALL-NEXT: retq ; ; AVX512BWVL-FAST-PERLANE-LABEL: shuffle_v16i32_to_v8i32_1: ; AVX512BWVL-FAST-PERLANE: # %bb.0: ; AVX512BWVL-FAST-PERLANE-NEXT: vmovaps (%rdi), %ymm0 ; AVX512BWVL-FAST-PERLANE-NEXT: vshufps {{.*#+}} ymm0 = ymm0[1,3],mem[1,3],ymm0[5,7],mem[5,7] ; AVX512BWVL-FAST-PERLANE-NEXT: vpermpd {{.*#+}} ymm0 = ymm0[0,2,1,3] ; AVX512BWVL-FAST-PERLANE-NEXT: vmovaps %ymm0, (%rsi) ; AVX512BWVL-FAST-PERLANE-NEXT: vzeroupper ; AVX512BWVL-FAST-PERLANE-NEXT: retq %vec = load <16 x i32>, ptr %L %strided.vec = shufflevector <16 x i32> %vec, <16 x i32> undef, <8 x i32> store <8 x i32> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v16i8_1(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v16i8_1: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrld $8, (%rdi), %zmm0 ; AVX512-NEXT: vpmovdb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> store <16 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v16i8_2(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v16i8_2: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrld $16, (%rdi), %zmm0 ; AVX512-NEXT: vpmovdb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> store <16 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v16i8_3(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v16i8_3: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrld $24, (%rdi), %zmm0 ; AVX512-NEXT: vpmovdb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <16 x i32> store <16 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v32i16_to_v8i16_1(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v32i16_to_v8i16_1: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $16, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, ptr %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> store <8 x i16> %strided.vec, ptr %S ret void } define void @shuffle_v32i16_to_v8i16_2(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v32i16_to_v8i16_2: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $32, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, ptr %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> store <8 x i16> %strided.vec, ptr %S ret void } define void @shuffle_v32i16_to_v8i16_3(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v32i16_to_v8i16_3: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $48, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqw %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <32 x i16>, ptr %L %strided.vec = shufflevector <32 x i16> %vec, <32 x i16> undef, <8 x i32> store <8 x i16> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v8i8_1(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8_1: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $8, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v8i8_2(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8_2: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $16, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v8i8_3(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8_3: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $24, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v8i8_4(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8_4: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $32, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v8i8_5(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8_5: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $40, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v8i8_6(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8_6: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $48, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, ptr %S ret void } define void @shuffle_v64i8_to_v8i8_7(ptr %L, ptr %S) nounwind { ; AVX512-LABEL: shuffle_v64i8_to_v8i8_7: ; AVX512: # %bb.0: ; AVX512-NEXT: vpsrlq $56, (%rdi), %zmm0 ; AVX512-NEXT: vpmovqb %zmm0, (%rsi) ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %vec = load <64 x i8>, ptr %L %strided.vec = shufflevector <64 x i8> %vec, <64 x i8> undef, <8 x i32> store <8 x i8> %strided.vec, ptr %S ret void } ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: ; AVX512VL: {{.*}}