; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-linux | FileCheck %s --check-prefix=X64 ; RUN: llc < %s -mtriple=x86_64-linux -mattr=+avx2 | FileCheck %s --check-prefix=X64-AVX2 ; RUN: llc < %s -mtriple=i686 -mattr=cmov | FileCheck %s --check-prefix=X86 declare <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64>, <2 x i64>) declare <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32>, <4 x i32>) declare <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16>, <8 x i16>) declare <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8>, <16 x i8>) define <2 x i64> @vec_v2i64(<2 x i64> %x, <2 x i64> %y) nounwind { ; X64-LABEL: vec_v2i64: ; X64: # %bb.0: ; X64-NEXT: movdqa %xmm0, %xmm2 ; X64-NEXT: psllq %xmm1, %xmm2 ; X64-NEXT: pshufd {{.*#+}} xmm3 = xmm1[2,3,2,3] ; X64-NEXT: movdqa %xmm0, %xmm4 ; X64-NEXT: psllq %xmm3, %xmm4 ; X64-NEXT: movdqa %xmm4, %xmm5 ; X64-NEXT: movsd {{.*#+}} xmm4 = xmm2[0],xmm4[1] ; X64-NEXT: psrlq %xmm1, %xmm2 ; X64-NEXT: psrlq %xmm3, %xmm5 ; X64-NEXT: movsd {{.*#+}} xmm5 = xmm2[0],xmm5[1] ; X64-NEXT: pcmpeqd %xmm5, %xmm0 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,0,3,2] ; X64-NEXT: pand %xmm1, %xmm0 ; X64-NEXT: pcmpeqd %xmm1, %xmm1 ; X64-NEXT: pxor %xmm1, %xmm0 ; X64-NEXT: por %xmm4, %xmm0 ; X64-NEXT: retq ; ; X64-AVX2-LABEL: vec_v2i64: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vpsllvq %xmm1, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpsrlvq %xmm1, %xmm2, %xmm1 ; X64-AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; X64-AVX2-NEXT: vblendvpd %xmm0, %xmm2, %xmm1, %xmm0 ; X64-AVX2-NEXT: retq ; ; X86-LABEL: vec_v2i64: ; X86: # %bb.0: ; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: subl $16, %esp ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl %esi, %eax ; X86-NEXT: shll %cl, %eax ; X86-NEXT: shldl %cl, %esi, %edx ; X86-NEXT: xorl %ebx, %ebx ; X86-NEXT: testb $32, %cl ; X86-NEXT: cmovnel %eax, %edx ; X86-NEXT: cmovnel %ebx, %eax ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill ; X86-NEXT: movl %edx, %eax ; X86-NEXT: movl %edx, %ebp ; X86-NEXT: movl %edx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: shrl %cl, %eax ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: testb $32, %cl ; X86-NEXT: cmovnel %ebx, %eax ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movb {{[0-9]+}}(%esp), %ch ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %esi ; X86-NEXT: movb %ch, %cl ; X86-NEXT: shll %cl, %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: shldl %cl, %eax, %edx ; X86-NEXT: testb $32, %ch ; X86-NEXT: cmovnel %esi, %edx ; X86-NEXT: cmovnel %ebx, %esi ; X86-NEXT: movl %edx, %edi ; X86-NEXT: shrl %cl, %edi ; X86-NEXT: testb $32, %ch ; X86-NEXT: cmovel %edi, %ebx ; X86-NEXT: movl (%esp), %eax # 4-byte Reload ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl ; X86-NEXT: shrdl %cl, %ebp, %eax ; X86-NEXT: testb $32, %cl ; X86-NEXT: cmovnel {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Folded Reload ; X86-NEXT: movl %esi, %ebp ; X86-NEXT: movb %ch, %cl ; X86-NEXT: shrdl %cl, %edx, %ebp ; X86-NEXT: testb $32, %ch ; X86-NEXT: cmovnel %edi, %ebp ; X86-NEXT: xorl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: xorl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: orl %eax, %ecx ; X86-NEXT: movl $-1, %ecx ; X86-NEXT: movl (%esp), %edi # 4-byte Reload ; X86-NEXT: cmovnel %ecx, %edi ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: cmovnel %ecx, %eax ; X86-NEXT: xorl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: xorl {{[0-9]+}}(%esp), %ebx ; X86-NEXT: orl %ebp, %ebx ; X86-NEXT: cmovnel %ecx, %esi ; X86-NEXT: cmovnel %ecx, %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl %edx, 12(%ecx) ; X86-NEXT: movl %esi, 8(%ecx) ; X86-NEXT: movl %eax, 4(%ecx) ; X86-NEXT: movl %edi, (%ecx) ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: addl $16, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx ; X86-NEXT: popl %ebp ; X86-NEXT: retl $4 %tmp = call <2 x i64> @llvm.ushl.sat.v2i64(<2 x i64> %x, <2 x i64> %y) ret <2 x i64> %tmp } define <4 x i32> @vec_v4i32(<4 x i32> %x, <4 x i32> %y) nounwind { ; X64-LABEL: vec_v4i32: ; X64: # %bb.0: ; X64-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3] ; X64-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[2,3,3,3,4,5,6,7] ; X64-NEXT: pshuflw {{.*#+}} xmm4 = xmm1[0,1,1,1,4,5,6,7] ; X64-NEXT: pslld $23, %xmm1 ; X64-NEXT: paddd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: cvttps2dq %xmm1, %xmm1 ; X64-NEXT: movdqa %xmm0, %xmm5 ; X64-NEXT: pmuludq %xmm1, %xmm5 ; X64-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,2,2,3] ; X64-NEXT: pshufd {{.*#+}} xmm7 = xmm0[1,1,3,3] ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3] ; X64-NEXT: pmuludq %xmm7, %xmm1 ; X64-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3] ; X64-NEXT: punpckldq {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1] ; X64-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[2,3,3,3,4,5,6,7] ; X64-NEXT: movdqa %xmm6, %xmm7 ; X64-NEXT: psrld %xmm1, %xmm7 ; X64-NEXT: pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7] ; X64-NEXT: movdqa %xmm5, %xmm2 ; X64-NEXT: psrld %xmm1, %xmm2 ; X64-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm7[1] ; X64-NEXT: movdqa %xmm6, %xmm1 ; X64-NEXT: psrld %xmm3, %xmm1 ; X64-NEXT: psrld %xmm4, %xmm5 ; X64-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm1[0] ; X64-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,3],xmm2[0,3] ; X64-NEXT: pcmpeqd %xmm5, %xmm0 ; X64-NEXT: pcmpeqd %xmm1, %xmm1 ; X64-NEXT: pxor %xmm1, %xmm0 ; X64-NEXT: por %xmm6, %xmm0 ; X64-NEXT: retq ; ; X64-AVX2-LABEL: vec_v4i32: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vpsllvd %xmm1, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpsrlvd %xmm1, %xmm2, %xmm1 ; X64-AVX2-NEXT: vpcmpeqd %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; X64-AVX2-NEXT: vblendvps %xmm0, %xmm2, %xmm1, %xmm0 ; X64-AVX2-NEXT: retq ; ; X86-LABEL: vec_v4i32: ; X86: # %bb.0: ; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: movb {{[0-9]+}}(%esp), %ch ; X86-NEXT: movb {{[0-9]+}}(%esp), %ah ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-NEXT: movl %ebx, %esi ; X86-NEXT: shll %cl, %esi ; X86-NEXT: movl %esi, %ebp ; X86-NEXT: shrl %cl, %ebp ; X86-NEXT: cmpl %ebp, %ebx ; X86-NEXT: movl $-1, %edx ; X86-NEXT: cmovnel %edx, %esi ; X86-NEXT: movl $-1, %ebx ; X86-NEXT: movl %edi, %edx ; X86-NEXT: movb %ah, %cl ; X86-NEXT: shll %cl, %edx ; X86-NEXT: movl %edx, %ebp ; X86-NEXT: shrl %cl, %ebp ; X86-NEXT: cmpl %ebp, %edi ; X86-NEXT: cmovnel %ebx, %edx ; X86-NEXT: movl $-1, %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %edi ; X86-NEXT: movb %ch, %cl ; X86-NEXT: shll %cl, %edi ; X86-NEXT: movl %edi, %ebp ; X86-NEXT: shrl %cl, %ebp ; X86-NEXT: cmpl %ebp, {{[0-9]+}}(%esp) ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebx ; X86-NEXT: cmovnel %eax, %edi ; X86-NEXT: movl %ebx, %ebp ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: shll %cl, %ebp ; X86-NEXT: movl %ebp, %eax ; X86-NEXT: shrl %cl, %eax ; X86-NEXT: cmpl %eax, %ebx ; X86-NEXT: movl $-1, %eax ; X86-NEXT: cmovnel %eax, %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %ebp, 12(%eax) ; X86-NEXT: movl %edi, 8(%eax) ; X86-NEXT: movl %edx, 4(%eax) ; X86-NEXT: movl %esi, (%eax) ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx ; X86-NEXT: popl %ebp ; X86-NEXT: retl $4 %tmp = call <4 x i32> @llvm.ushl.sat.v4i32(<4 x i32> %x, <4 x i32> %y) ret <4 x i32> %tmp } define <8 x i16> @vec_v8i16(<8 x i16> %x, <8 x i16> %y) nounwind { ; X64-LABEL: vec_v8i16: ; X64: # %bb.0: ; X64-NEXT: movdqa %xmm1, %xmm2 ; X64-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7] ; X64-NEXT: pslld $23, %xmm2 ; X64-NEXT: movdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216] ; X64-NEXT: paddd %xmm3, %xmm2 ; X64-NEXT: cvttps2dq %xmm2, %xmm4 ; X64-NEXT: pslld $16, %xmm4 ; X64-NEXT: psrad $16, %xmm4 ; X64-NEXT: movdqa %xmm1, %xmm2 ; X64-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3] ; X64-NEXT: pslld $23, %xmm2 ; X64-NEXT: paddd %xmm3, %xmm2 ; X64-NEXT: cvttps2dq %xmm2, %xmm2 ; X64-NEXT: pslld $16, %xmm2 ; X64-NEXT: psrad $16, %xmm2 ; X64-NEXT: packssdw %xmm4, %xmm2 ; X64-NEXT: pmullw %xmm0, %xmm2 ; X64-NEXT: psllw $12, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm3 ; X64-NEXT: psraw $15, %xmm3 ; X64-NEXT: movdqa %xmm2, %xmm4 ; X64-NEXT: psrlw $8, %xmm4 ; X64-NEXT: pand %xmm3, %xmm4 ; X64-NEXT: pandn %xmm2, %xmm3 ; X64-NEXT: por %xmm4, %xmm3 ; X64-NEXT: paddw %xmm1, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm4 ; X64-NEXT: psraw $15, %xmm4 ; X64-NEXT: movdqa %xmm4, %xmm5 ; X64-NEXT: pandn %xmm3, %xmm5 ; X64-NEXT: psrlw $4, %xmm3 ; X64-NEXT: pand %xmm4, %xmm3 ; X64-NEXT: por %xmm5, %xmm3 ; X64-NEXT: paddw %xmm1, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm4 ; X64-NEXT: psraw $15, %xmm4 ; X64-NEXT: movdqa %xmm4, %xmm5 ; X64-NEXT: pandn %xmm3, %xmm5 ; X64-NEXT: psrlw $2, %xmm3 ; X64-NEXT: pand %xmm4, %xmm3 ; X64-NEXT: por %xmm5, %xmm3 ; X64-NEXT: paddw %xmm1, %xmm1 ; X64-NEXT: psraw $15, %xmm1 ; X64-NEXT: movdqa %xmm1, %xmm4 ; X64-NEXT: pandn %xmm3, %xmm4 ; X64-NEXT: psrlw $1, %xmm3 ; X64-NEXT: pand %xmm1, %xmm3 ; X64-NEXT: por %xmm4, %xmm3 ; X64-NEXT: pcmpeqw %xmm3, %xmm0 ; X64-NEXT: pcmpeqd %xmm1, %xmm1 ; X64-NEXT: pxor %xmm1, %xmm0 ; X64-NEXT: por %xmm2, %xmm0 ; X64-NEXT: retq ; ; X64-AVX2-LABEL: vec_v8i16: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; X64-AVX2-NEXT: vpsllvd %ymm1, %ymm2, %ymm2 ; X64-AVX2-NEXT: vpshufb {{.*#+}} ymm2 = ymm2[0,1,4,5,8,9,12,13,u,u,u,u,u,u,u,u,16,17,20,21,24,25,28,29,u,u,u,u,u,u,u,u] ; X64-AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3] ; X64-AVX2-NEXT: vpmovzxwd {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero ; X64-AVX2-NEXT: vpsrlvd %ymm1, %ymm3, %ymm1 ; X64-AVX2-NEXT: vextracti128 $1, %ymm1, %xmm3 ; X64-AVX2-NEXT: vpackusdw %xmm3, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpcmpeqw %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpor %xmm2, %xmm0, %xmm0 ; X64-AVX2-NEXT: vzeroupper ; X64-AVX2-NEXT: retq ; ; X86-LABEL: vec_v8i16: ; X86: # %bb.0: ; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: subl $12, %esp ; X86-NEXT: movl {{[0-9]+}}(%esp), %ebp ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl %eax, %ebx ; X86-NEXT: shll %cl, %ebx ; X86-NEXT: movzwl %bx, %edi ; X86-NEXT: shrl %cl, %edi ; X86-NEXT: cmpw %di, %ax ; X86-NEXT: movl $65535, %eax # imm = 0xFFFF ; X86-NEXT: cmovnel %eax, %ebx ; X86-NEXT: movl %ebx, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl %esi, %eax ; X86-NEXT: movl %edx, %ecx ; X86-NEXT: shll %cl, %eax ; X86-NEXT: movzwl %ax, %edi ; X86-NEXT: shrl %cl, %edi ; X86-NEXT: cmpw %di, %si ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl $65535, %esi # imm = 0xFFFF ; X86-NEXT: cmovnel %esi, %eax ; X86-NEXT: movl %eax, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl %ebp, %eax ; X86-NEXT: shll %cl, %eax ; X86-NEXT: movzwl %ax, %edx ; X86-NEXT: shrl %cl, %edx ; X86-NEXT: cmpw %dx, %bp ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: cmovnel %esi, %eax ; X86-NEXT: movl %eax, (%esp) # 4-byte Spill ; X86-NEXT: movl $65535, %eax # imm = 0xFFFF ; X86-NEXT: movl {{[0-9]+}}(%esp), %esi ; X86-NEXT: movl %esi, %ebp ; X86-NEXT: shll %cl, %ebp ; X86-NEXT: movzwl %bp, %edx ; X86-NEXT: shrl %cl, %edx ; X86-NEXT: cmpw %dx, %si ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: cmovnel %eax, %ebp ; X86-NEXT: movl %edx, %ebx ; X86-NEXT: shll %cl, %ebx ; X86-NEXT: movzwl %bx, %esi ; X86-NEXT: shrl %cl, %esi ; X86-NEXT: cmpw %si, %dx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl $65535, %esi # imm = 0xFFFF ; X86-NEXT: cmovnel %esi, %ebx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl %edx, %edi ; X86-NEXT: shll %cl, %edi ; X86-NEXT: movzwl %di, %eax ; X86-NEXT: shrl %cl, %eax ; X86-NEXT: cmpw %ax, %dx ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movl %edx, %esi ; X86-NEXT: shll %cl, %esi ; X86-NEXT: movzwl %si, %eax ; X86-NEXT: shrl %cl, %eax ; X86-NEXT: cmpw %ax, %dx ; X86-NEXT: movl $65535, %eax # imm = 0xFFFF ; X86-NEXT: cmovnel %eax, %esi ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: shll %cl, %eax ; X86-NEXT: movzwl %ax, %edx ; X86-NEXT: shrl %cl, %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: cmpw %dx, %cx ; X86-NEXT: movl $65535, %ecx # imm = 0xFFFF ; X86-NEXT: cmovnel %ecx, %eax ; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movw %ax, 14(%ecx) ; X86-NEXT: movw %si, 12(%ecx) ; X86-NEXT: movw %di, 10(%ecx) ; X86-NEXT: movw %bx, 8(%ecx) ; X86-NEXT: movw %bp, 6(%ecx) ; X86-NEXT: movl (%esp), %eax # 4-byte Reload ; X86-NEXT: movw %ax, 4(%ecx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: movw %ax, 2(%ecx) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %eax # 4-byte Reload ; X86-NEXT: movw %ax, (%ecx) ; X86-NEXT: movl %ecx, %eax ; X86-NEXT: addl $12, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx ; X86-NEXT: popl %ebp ; X86-NEXT: retl $4 %tmp = call <8 x i16> @llvm.ushl.sat.v8i16(<8 x i16> %x, <8 x i16> %y) ret <8 x i16> %tmp } define <16 x i8> @vec_v16i8(<16 x i8> %x, <16 x i8> %y) nounwind { ; X64-LABEL: vec_v16i8: ; X64: # %bb.0: ; X64-NEXT: psllw $5, %xmm1 ; X64-NEXT: pxor %xmm3, %xmm3 ; X64-NEXT: pxor %xmm4, %xmm4 ; X64-NEXT: pcmpgtb %xmm1, %xmm4 ; X64-NEXT: movdqa %xmm4, %xmm5 ; X64-NEXT: pandn %xmm0, %xmm5 ; X64-NEXT: movdqa %xmm0, %xmm2 ; X64-NEXT: psllw $4, %xmm2 ; X64-NEXT: pand %xmm4, %xmm2 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; X64-NEXT: por %xmm5, %xmm2 ; X64-NEXT: paddb %xmm1, %xmm1 ; X64-NEXT: pxor %xmm5, %xmm5 ; X64-NEXT: pcmpgtb %xmm1, %xmm5 ; X64-NEXT: movdqa %xmm5, %xmm6 ; X64-NEXT: pandn %xmm2, %xmm6 ; X64-NEXT: psllw $2, %xmm2 ; X64-NEXT: pand %xmm5, %xmm2 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2 ; X64-NEXT: por %xmm6, %xmm2 ; X64-NEXT: paddb %xmm1, %xmm1 ; X64-NEXT: pcmpgtb %xmm1, %xmm3 ; X64-NEXT: movdqa %xmm3, %xmm1 ; X64-NEXT: pandn %xmm2, %xmm1 ; X64-NEXT: paddb %xmm2, %xmm2 ; X64-NEXT: pand %xmm3, %xmm2 ; X64-NEXT: por %xmm1, %xmm2 ; X64-NEXT: movdqa %xmm2, %xmm1 ; X64-NEXT: psrlw $4, %xmm1 ; X64-NEXT: pand %xmm4, %xmm1 ; X64-NEXT: pandn %xmm2, %xmm4 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: por %xmm4, %xmm1 ; X64-NEXT: movdqa %xmm5, %xmm4 ; X64-NEXT: pandn %xmm1, %xmm4 ; X64-NEXT: psrlw $2, %xmm1 ; X64-NEXT: pand %xmm5, %xmm1 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: por %xmm4, %xmm1 ; X64-NEXT: movdqa %xmm3, %xmm4 ; X64-NEXT: pandn %xmm1, %xmm4 ; X64-NEXT: psrlw $1, %xmm1 ; X64-NEXT: pand %xmm3, %xmm1 ; X64-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1 ; X64-NEXT: por %xmm4, %xmm1 ; X64-NEXT: pcmpeqb %xmm1, %xmm0 ; X64-NEXT: pcmpeqd %xmm1, %xmm1 ; X64-NEXT: pxor %xmm1, %xmm0 ; X64-NEXT: por %xmm2, %xmm0 ; X64-NEXT: retq ; ; X64-AVX2-LABEL: vec_v16i8: ; X64-AVX2: # %bb.0: ; X64-AVX2-NEXT: vpsllw $5, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpsllw $4, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2 ; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm2, %xmm0, %xmm2 ; X64-AVX2-NEXT: vpsllw $2, %xmm2, %xmm3 ; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 ; X64-AVX2-NEXT: vpaddb %xmm1, %xmm1, %xmm4 ; X64-AVX2-NEXT: vpblendvb %xmm4, %xmm3, %xmm2, %xmm2 ; X64-AVX2-NEXT: vpaddb %xmm2, %xmm2, %xmm3 ; X64-AVX2-NEXT: vpaddb %xmm4, %xmm4, %xmm5 ; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm3, %xmm2, %xmm2 ; X64-AVX2-NEXT: vpsrlw $4, %xmm2, %xmm3 ; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 ; X64-AVX2-NEXT: vpblendvb %xmm1, %xmm3, %xmm2, %xmm1 ; X64-AVX2-NEXT: vpsrlw $2, %xmm1, %xmm3 ; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 ; X64-AVX2-NEXT: vpblendvb %xmm4, %xmm3, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpsrlw $1, %xmm1, %xmm3 ; X64-AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm3, %xmm3 ; X64-AVX2-NEXT: vpblendvb %xmm5, %xmm3, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; X64-AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; X64-AVX2-NEXT: vpblendvb %xmm0, %xmm2, %xmm1, %xmm0 ; X64-AVX2-NEXT: retq ; ; X86-LABEL: vec_v16i8: ; X86: # %bb.0: ; X86-NEXT: pushl %ebp ; X86-NEXT: pushl %ebx ; X86-NEXT: pushl %edi ; X86-NEXT: pushl %esi ; X86-NEXT: subl $48, %esp ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx ; X86-NEXT: movb {{[0-9]+}}(%esp), %ch ; X86-NEXT: movb {{[0-9]+}}(%esp), %ah ; X86-NEXT: movb {{[0-9]+}}(%esp), %dh ; X86-NEXT: movb {{[0-9]+}}(%esp), %cl ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ebx ; X86-NEXT: movb %bl, %bh ; X86-NEXT: shlb %cl, %bh ; X86-NEXT: movzbl %bh, %edi ; X86-NEXT: shrb %cl, %bh ; X86-NEXT: cmpb %bh, %bl ; X86-NEXT: movl $255, %esi ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movb %dh, %bl ; X86-NEXT: movb %ah, %cl ; X86-NEXT: shlb %cl, %bl ; X86-NEXT: movzbl %bl, %edi ; X86-NEXT: shrb %cl, %bl ; X86-NEXT: cmpb %bl, %dh ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movb %ch, %ah ; X86-NEXT: movb %dl, %cl ; X86-NEXT: shlb %cl, %ah ; X86-NEXT: movzbl %ah, %edi ; X86-NEXT: shrb %cl, %ah ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %edx ; X86-NEXT: cmpb %ah, %ch ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movb %dl, %ah ; X86-NEXT: movl %eax, %ecx ; X86-NEXT: shlb %cl, %ah ; X86-NEXT: movzbl %ah, %edi ; X86-NEXT: shrb %cl, %ah ; X86-NEXT: cmpb %ah, %dl ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movl %edi, (%esp) # 4-byte Spill ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %ebp ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %ebp ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %edi ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %edi ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movl %eax, %edx ; X86-NEXT: shlb %cl, %dl ; X86-NEXT: movzbl %dl, %ebx ; X86-NEXT: shrb %cl, %dl ; X86-NEXT: cmpb %dl, %al ; X86-NEXT: cmovnel %esi, %ebx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx ; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movb %al, %ah ; X86-NEXT: shlb %cl, %ah ; X86-NEXT: movzbl %ah, %edx ; X86-NEXT: shrb %cl, %ah ; X86-NEXT: cmpb %ah, %al ; X86-NEXT: cmovnel %esi, %edx ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movb %dl, 15(%eax) ; X86-NEXT: movb %bl, 14(%eax) ; X86-NEXT: movl %edi, %ecx ; X86-NEXT: movb %cl, 13(%eax) ; X86-NEXT: movl %ebp, %ecx ; X86-NEXT: movb %cl, 12(%eax) ; X86-NEXT: movl (%esp), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 11(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 10(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 9(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 8(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 7(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 6(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 5(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 4(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 3(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 2(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, 1(%eax) ; X86-NEXT: movl {{[-0-9]+}}(%e{{[sb]}}p), %ecx # 4-byte Reload ; X86-NEXT: movb %cl, (%eax) ; X86-NEXT: addl $48, %esp ; X86-NEXT: popl %esi ; X86-NEXT: popl %edi ; X86-NEXT: popl %ebx ; X86-NEXT: popl %ebp ; X86-NEXT: retl $4 %tmp = call <16 x i8> @llvm.ushl.sat.v16i8(<16 x i8> %x, <16 x i8> %y) ret <16 x i8> %tmp }