; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mattr=+avx -mtriple=i686-pc-win32 | FileCheck %s --check-prefixes=AVX1 ; RUN: llc < %s -mattr=+avx2 -mtriple=i686-pc-win32 | FileCheck %s --check-prefixes=AVX2 define void @endless_loop() { ; AVX1-LABEL: endless_loop: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: vmovaps (%eax), %xmm0 ; AVX1-NEXT: vshufps {{.*#+}} xmm1 = xmm0[0,0,0,0] ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1 ; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3,4,5,6],ymm1[7] ; AVX1-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; AVX1-NEXT: vblendps {{.*#+}} xmm0 = xmm0[0],xmm2[1,2,3] ; AVX1-NEXT: vmovaps %ymm0, (%eax) ; AVX1-NEXT: vmovaps %ymm1, (%eax) ; AVX1-NEXT: vzeroupper ; AVX1-NEXT: retl ; ; AVX2-LABEL: endless_loop: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: vbroadcastss (%eax), %xmm0 ; AVX2-NEXT: vxorps %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vblendps {{.*#+}} xmm1 = xmm0[0],xmm1[1,2,3] ; AVX2-NEXT: vbroadcastss %xmm0, %ymm0 ; AVX2-NEXT: vxorps %xmm2, %xmm2, %xmm2 ; AVX2-NEXT: vblendps {{.*#+}} ymm0 = ymm2[0,1,2,3,4,5,6],ymm0[7] ; AVX2-NEXT: vmovaps %ymm0, (%eax) ; AVX2-NEXT: vmovaps %ymm1, (%eax) ; AVX2-NEXT: vzeroupper ; AVX2-NEXT: retl entry: %0 = load <8 x i32>, ptr addrspace(1) undef, align 32 %1 = shufflevector <8 x i32> %0, <8 x i32> undef, <16 x i32> %2 = shufflevector <16 x i32> , <16 x i32> %1, <16 x i32> store <16 x i32> %2, ptr addrspace(1) undef, align 64 ret void }