; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-- -mattr=+sse2 | FileCheck %s --check-prefixes=SSE ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 ; RUN: llc < %s -mtriple=x86_64-- -mattr=+avx512f,+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512 declare void @use_v8i1(<8 x i1>) declare void @use_v8i8(<8 x i8>) define <8 x i16> @cmp_ne_load_const(ptr %x) nounwind { ; SSE-LABEL: cmp_ne_load_const: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_ne_load_const: ; AVX: # %bb.0: ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %loadx = load <8 x i8>, ptr %x %icmp = icmp ne <8 x i8> %loadx, zeroinitializer %sext = sext <8 x i1> %icmp to <8 x i16> ret <8 x i16> %sext } ; negative test - simple loads only define <8 x i16> @cmp_ne_load_const_volatile(ptr %x) nounwind { ; SSE-LABEL: cmp_ne_load_const_volatile: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX2-LABEL: cmp_ne_load_const_volatile: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: cmp_ne_load_const_volatile: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %loadx = load volatile <8 x i8>, ptr %x %icmp = icmp ne <8 x i8> %loadx, zeroinitializer %sext = sext <8 x i1> %icmp to <8 x i16> ret <8 x i16> %sext } ; negative test - don't create extra load define <8 x i16> @cmp_ne_load_const_extra_use1(ptr %x) nounwind { ; SSE-LABEL: cmp_ne_load_const_extra_use1: ; SSE: # %bb.0: ; SSE-NEXT: subq $24, %rsp ; SSE-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill ; SSE-NEXT: callq use_v8i8@PLT ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: addq $24, %rsp ; SSE-NEXT: retq ; ; AVX2-LABEL: cmp_ne_load_const_extra_use1: ; AVX2: # %bb.0: ; AVX2-NEXT: subq $24, %rsp ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill ; AVX2-NEXT: callq use_v8i8@PLT ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqb (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 ; AVX2-NEXT: addq $24, %rsp ; AVX2-NEXT: retq ; ; AVX512-LABEL: cmp_ne_load_const_extra_use1: ; AVX512: # %bb.0: ; AVX512-NEXT: subq $24, %rsp ; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill ; AVX512-NEXT: callq use_v8i8@PLT ; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0 ; AVX512-NEXT: vpcmpeqb (%rsp), %xmm0, %xmm0 # 16-byte Folded Reload ; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0 ; AVX512-NEXT: addq $24, %rsp ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %loadx = load <8 x i8>, ptr %x call void @use_v8i8(<8 x i8> %loadx) %icmp = icmp ne <8 x i8> %loadx, zeroinitializer %sext = sext <8 x i1> %icmp to <8 x i16> ret <8 x i16> %sext } ; negative test - don't create extra compare define <8 x i16> @cmp_ne_load_const_extra_use2(ptr %x) nounwind { ; SSE-LABEL: cmp_ne_load_const_extra_use2: ; SSE: # %bb.0: ; SSE-NEXT: subq $24, %rsp ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: callq use_v8i1@PLT ; SSE-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: addq $24, %rsp ; SSE-NEXT: retq ; ; AVX2-LABEL: cmp_ne_load_const_extra_use2: ; AVX2: # %bb.0: ; AVX2-NEXT: subq $24, %rsp ; AVX2-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill ; AVX2-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX2-NEXT: callq use_v8i1@PLT ; AVX2-NEXT: vpmovsxbw (%rsp), %xmm0 # 16-byte Folded Reload ; AVX2-NEXT: addq $24, %rsp ; AVX2-NEXT: retq ; ; AVX512-LABEL: cmp_ne_load_const_extra_use2: ; AVX512: # %bb.0: ; AVX512-NEXT: subq $72, %rsp ; AVX512-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vmovdqu64 %zmm0, (%rsp) # 64-byte Spill ; AVX512-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: callq use_v8i1@PLT ; AVX512-NEXT: vpmovsxbw (%rsp), %xmm0 # 16-byte Folded Reload ; AVX512-NEXT: addq $72, %rsp ; AVX512-NEXT: retq %loadx = load <8 x i8>, ptr %x %icmp = icmp ne <8 x i8> %loadx, zeroinitializer call void @use_v8i1(<8 x i1> %icmp) %sext = sext <8 x i1> %icmp to <8 x i16> ret <8 x i16> %sext } ; negative test - not free extend define <8 x i16> @cmp_ne_no_load_const(i64 %x) nounwind { ; SSE-LABEL: cmp_ne_no_load_const: ; SSE: # %bb.0: ; SSE-NEXT: movq %rdi, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pxor %xmm1, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX2-LABEL: cmp_ne_no_load_const: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovq %rdi, %xmm0 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxbw %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: cmp_ne_no_load_const: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovq %rdi, %xmm0 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpmovsxbw %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %t = bitcast i64 %x to <8 x i8> %icmp = icmp ne <8 x i8> %t, zeroinitializer %sext = sext <8 x i1> %icmp to <8 x i16> ret <8 x i16> %sext } define <4 x i32> @cmp_ult_load_const(ptr %x) nounwind { ; SSE-LABEL: cmp_ult_load_const: ; SSE: # %bb.0: ; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,214,0,255,u,u,u,u,u,u,u,u,u,u,u,u] ; SSE-NEXT: pmaxub %xmm0, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_ult_load_const: ; AVX: # %bb.0: ; AVX-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,214,0,255] ; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %loadx = load <4 x i8>, ptr %x %icmp = icmp ult <4 x i8> %loadx, %sext = sext <4 x i1> %icmp to <4 x i32> ret <4 x i32> %sext } ; negative test - type must be legal define <3 x i32> @cmp_ult_load_const_bad_type(ptr %x) nounwind { ; SSE-LABEL: cmp_ult_load_const_bad_type: ; SSE: # %bb.0: ; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [42,214,0,u,u,u,u,u,u,u,u,u,u,u,u,u] ; SSE-NEXT: pmaxub %xmm0, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX2-LABEL: cmp_ult_load_const_bad_type: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX2-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX2-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxbd %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: cmp_ult_load_const_bad_type: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero ; AVX512-NEXT: vpmaxub {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 ; AVX512-NEXT: vpcmpeqb %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpmovsxbd %xmm0, %xmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %loadx = load <3 x i8>, ptr %x %icmp = icmp ult <3 x i8> %loadx, %sext = sext <3 x i1> %icmp to <3 x i32> ret <3 x i32> %sext } ; Signed compare needs signed extend. define <4 x i32> @cmp_slt_load_const(ptr %x) nounwind { ; SSE-LABEL: cmp_slt_load_const: ; SSE: # %bb.0: ; SSE-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3] ; SSE-NEXT: movdqa {{.*#+}} xmm0 = [10794,10794,54998,54998,0,0,65535,65535] ; SSE-NEXT: pcmpgtb %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_slt_load_const: ; AVX: # %bb.0: ; AVX-NEXT: vpmovsxbd (%rdi), %xmm0 ; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [42,4294967254,0,4294967295] ; AVX-NEXT: vpcmpgtd %xmm0, %xmm1, %xmm0 ; AVX-NEXT: retq %loadx = load <4 x i8>, ptr %x %icmp = icmp slt <4 x i8> %loadx, %sext = sext <4 x i1> %icmp to <4 x i32> ret <4 x i32> %sext } define <2 x i64> @cmp_ne_zextload(ptr %x, ptr %y) nounwind { ; SSE-LABEL: cmp_ne_zextload: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: pcmpeqd %xmm0, %xmm1 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1] ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX2-LABEL: cmp_ne_zextload: ; AVX2: # %bb.0: ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero ; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero ; AVX2-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: cmp_ne_zextload: ; AVX512: # %bb.0: ; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm0 = mem[0],zero,mem[1],zero ; AVX512-NEXT: vpmovzxdq {{.*#+}} xmm1 = mem[0],zero,mem[1],zero ; AVX512-NEXT: vpcmpeqq %xmm1, %xmm0, %xmm0 ; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %loadx = load <2 x i32>, ptr %x %loady = load <2 x i32>, ptr %y %icmp = icmp ne <2 x i32> %loadx, %loady %sext = sext <2 x i1> %icmp to <2 x i64> ret <2 x i64> %sext } define <8 x i16> @cmp_ugt_zextload(ptr %x, ptr %y) nounwind { ; SSE-LABEL: cmp_ugt_zextload: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: pminub %xmm0, %xmm1 ; SSE-NEXT: pcmpeqb %xmm0, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pcmpeqd %xmm0, %xmm0 ; SSE-NEXT: pxor %xmm1, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_ugt_zextload: ; AVX: # %bb.0: ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero ; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %loadx = load <8 x i8>, ptr %x %loady = load <8 x i8>, ptr %y %icmp = icmp ugt <8 x i8> %loadx, %loady %sext = sext <8 x i1> %icmp to <8 x i16> ret <8 x i16> %sext } ; Signed compare needs signed extends. define <8 x i16> @cmp_sgt_zextload(ptr %x, ptr %y) nounwind { ; SSE-LABEL: cmp_sgt_zextload: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm0 = mem[0],zero ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: pcmpgtb %xmm1, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: retq ; ; AVX-LABEL: cmp_sgt_zextload: ; AVX: # %bb.0: ; AVX-NEXT: vpmovsxbw (%rdi), %xmm0 ; AVX-NEXT: vpmovsxbw (%rsi), %xmm1 ; AVX-NEXT: vpcmpgtw %xmm1, %xmm0, %xmm0 ; AVX-NEXT: retq %loadx = load <8 x i8>, ptr %x %loady = load <8 x i8>, ptr %y %icmp = icmp sgt <8 x i8> %loadx, %loady %sext = sext <8 x i1> %icmp to <8 x i16> ret <8 x i16> %sext } ; negative test - don't change a legal op ; TODO: Or should we? We can eliminate the vpmovsxwd at the cost of a 256-bit ymm vpcmpeqw. define <8 x i32> @cmp_ne_zextload_from_legal_op(ptr %x, ptr %y) { ; SSE-LABEL: cmp_ne_zextload_from_legal_op: ; SSE: # %bb.0: ; SSE-NEXT: movdqa (%rdi), %xmm0 ; SSE-NEXT: pcmpeqw (%rsi), %xmm0 ; SSE-NEXT: pcmpeqd %xmm1, %xmm1 ; SSE-NEXT: pxor %xmm0, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSE-NEXT: retq ; ; AVX2-LABEL: cmp_ne_zextload_from_legal_op: ; AVX2: # %bb.0: ; AVX2-NEXT: vmovdqa (%rdi), %xmm0 ; AVX2-NEXT: vpcmpeqw (%rsi), %xmm0, %xmm0 ; AVX2-NEXT: vpcmpeqd %xmm1, %xmm1, %xmm1 ; AVX2-NEXT: vpxor %xmm1, %xmm0, %xmm0 ; AVX2-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: cmp_ne_zextload_from_legal_op: ; AVX512: # %bb.0: ; AVX512-NEXT: vmovdqa (%rdi), %xmm0 ; AVX512-NEXT: vpcmpeqw (%rsi), %xmm0, %xmm0 ; AVX512-NEXT: vpternlogq $15, %zmm0, %zmm0, %zmm0 ; AVX512-NEXT: vpmovsxwd %xmm0, %ymm0 ; AVX512-NEXT: retq %loadx = load <8 x i16>, ptr %x %loady = load <8 x i16>, ptr %y %icmp = icmp ne <8 x i16> %loadx, %loady %sext = sext <8 x i1> %icmp to <8 x i32> ret <8 x i32> %sext } ; Both uses of the load can be absorbed by the zext-load, so we eliminate the explicit casts. define <8 x i32> @PR50055(ptr %src, ptr %dst) nounwind { ; SSE-LABEL: PR50055: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: pxor %xmm3, %xmm3 ; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pcmpeqb %xmm3, %xmm2 ; SSE-NEXT: pcmpeqd %xmm3, %xmm3 ; SSE-NEXT: pxor %xmm2, %xmm3 ; SSE-NEXT: movdqa %xmm3, %xmm2 ; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3] ; SSE-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4,4,5,5,6,6,7,7] ; SSE-NEXT: movdqa %xmm3, 16(%rsi) ; SSE-NEXT: movdqa %xmm2, (%rsi) ; SSE-NEXT: retq ; ; AVX-LABEL: PR50055: ; AVX: # %bb.0: ; AVX-NEXT: vpmovzxbd {{.*#+}} ymm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm1 ; AVX-NEXT: vmovdqa %ymm1, (%rsi) ; AVX-NEXT: retq %load = load <8 x i8>, ptr %src %zext = zext <8 x i8> %load to <8 x i32> %icmp = icmp ne <8 x i8> %load, zeroinitializer %sext = sext <8 x i1> %icmp to <8 x i32> store <8 x i32> %sext, ptr %dst ret <8 x i32> %zext } ; negative test - extra uses must be absorbable by a zext-load. define <8 x i16> @multi_use_narrower_size(ptr %src, ptr %dst) nounwind { ; SSE-LABEL: multi_use_narrower_size: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm1 = mem[0],zero ; SSE-NEXT: pxor %xmm2, %xmm2 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3],xmm0[4],xmm2[4],xmm0[5],xmm2[5],xmm0[6],xmm2[6],xmm0[7],xmm2[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pcmpeqb %xmm2, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm2 ; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3] ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSE-NEXT: movdqa %xmm1, 16(%rsi) ; SSE-NEXT: movdqa %xmm2, (%rsi) ; SSE-NEXT: retq ; ; AVX-LABEL: multi_use_narrower_size: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX-NEXT: vpmovzxbw {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpmovsxbd %xmm1, %ymm1 ; AVX-NEXT: vmovdqa %ymm1, (%rsi) ; AVX-NEXT: vzeroupper ; AVX-NEXT: retq %load = load <8 x i8>, ptr %src %zext = zext <8 x i8> %load to <8 x i16> %icmp = icmp eq <8 x i8> %load, zeroinitializer %sext = sext <8 x i1> %icmp to <8 x i32> store <8 x i32> %sext, ptr %dst ret <8 x i16> %zext } ; negative test - extra uses must be absorbable by a zext-load. define <8 x i32> @multi_use_wider_size(ptr %src, ptr %dst) nounwind { ; SSE-LABEL: multi_use_wider_size: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: pxor %xmm3, %xmm3 ; SSE-NEXT: movdqa %xmm2, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1],xmm1[2],xmm3[2],xmm1[3],xmm3[3],xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3] ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm3[4],xmm1[5],xmm3[5],xmm1[6],xmm3[6],xmm1[7],xmm3[7] ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pcmpeqb %xmm3, %xmm2 ; SSE-NEXT: movdqa %xmm2, (%rsi) ; SSE-NEXT: retq ; ; AVX-LABEL: multi_use_wider_size: ; AVX: # %bb.0: ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero ; AVX-NEXT: vpmovzxbd {{.*#+}} ymm0 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero,xmm1[2],zero,zero,zero,xmm1[3],zero,zero,zero,xmm1[4],zero,zero,zero,xmm1[5],zero,zero,zero,xmm1[6],zero,zero,zero,xmm1[7],zero,zero,zero ; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2 ; AVX-NEXT: vpcmpeqb %xmm2, %xmm1, %xmm1 ; AVX-NEXT: vpmovsxbw %xmm1, %xmm1 ; AVX-NEXT: vmovdqa %xmm1, (%rsi) ; AVX-NEXT: retq %load = load <8 x i8>, ptr %src %zext = zext <8 x i8> %load to <8 x i32> %icmp = icmp eq <8 x i8> %load, zeroinitializer %sext = sext <8 x i1> %icmp to <8 x i16> store <8 x i16> %sext, ptr %dst ret <8 x i32> %zext } define <4 x i64> @PR50055_signed(ptr %src, ptr %dst) { ; SSE-LABEL: PR50055_signed: ; SSE: # %bb.0: ; SSE-NEXT: movq {{.*#+}} xmm2 = mem[0],zero ; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3],xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7] ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3] ; SSE-NEXT: psrad $24, %xmm0 ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSE-NEXT: psrad $24, %xmm1 ; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE-NEXT: pxor %xmm3, %xmm3 ; SSE-NEXT: pcmpgtb %xmm3, %xmm2 ; SSE-NEXT: movdqa %xmm2, %xmm3 ; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3] ; SSE-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4,4,5,5,6,6,7,7] ; SSE-NEXT: movdqa %xmm2, 16(%rsi) ; SSE-NEXT: movdqa %xmm3, (%rsi) ; SSE-NEXT: retq ; ; AVX-LABEL: PR50055_signed: ; AVX: # %bb.0: ; AVX-NEXT: vpmovsxbd (%rdi), %ymm0 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX-NEXT: vpcmpgtd %ymm1, %ymm0, %ymm1 ; AVX-NEXT: vmovdqa %ymm1, (%rsi) ; AVX-NEXT: retq %t1 = load <8 x i8>, ptr %src, align 1 %conv = sext <8 x i8> %t1 to <8 x i32> %t2 = bitcast <8 x i32> %conv to <4 x i64> %cmp = icmp sgt <8 x i8> %t1, zeroinitializer %sext = sext <8 x i1> %cmp to <8 x i32> store <8 x i32> %sext, ptr %dst, align 32 ret <4 x i64> %t2 } define <8 x i32> @PR63946(<8 x i32> %a0, <8 x i32> %b0) nounwind { ; SSE-LABEL: PR63946: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movdqa %xmm1, %xmm4 ; SSE-NEXT: movdqa %xmm0, %xmm13 ; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm2[1,2,3,0] ; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[1,2,3,0] ; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm3[2,3,0,1] ; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm2[2,3,0,1] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[3,0,1,2] ; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm2[3,0,1,2] ; SSE-NEXT: pcmpeqd %xmm2, %xmm0 ; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill ; SSE-NEXT: movdqa %xmm9, %xmm11 ; SSE-NEXT: pcmpeqd %xmm4, %xmm11 ; SSE-NEXT: movdqa %xmm7, %xmm12 ; SSE-NEXT: movdqa %xmm8, %xmm10 ; SSE-NEXT: movdqa %xmm5, %xmm15 ; SSE-NEXT: pcmpeqd %xmm4, %xmm15 ; SSE-NEXT: movdqa %xmm1, %xmm14 ; SSE-NEXT: pcmpeqd %xmm4, %xmm14 ; SSE-NEXT: pcmpeqd %xmm4, %xmm2 ; SSE-NEXT: pcmpeqd %xmm4, %xmm7 ; SSE-NEXT: pcmpeqd %xmm4, %xmm8 ; SSE-NEXT: movdqa %xmm6, %xmm0 ; SSE-NEXT: pcmpeqd %xmm4, %xmm6 ; SSE-NEXT: pcmpeqd %xmm3, %xmm4 ; SSE-NEXT: por %xmm4, %xmm11 ; SSE-NEXT: pcmpeqd %xmm13, %xmm12 ; SSE-NEXT: por {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Folded Reload ; SSE-NEXT: pcmpeqd %xmm13, %xmm10 ; SSE-NEXT: pcmpeqd %xmm13, %xmm0 ; SSE-NEXT: por %xmm15, %xmm2 ; SSE-NEXT: por %xmm11, %xmm2 ; SSE-NEXT: pcmpeqd %xmm13, %xmm3 ; SSE-NEXT: por %xmm3, %xmm10 ; SSE-NEXT: por %xmm12, %xmm10 ; SSE-NEXT: por %xmm14, %xmm7 ; SSE-NEXT: pcmpeqd %xmm13, %xmm9 ; SSE-NEXT: por %xmm0, %xmm9 ; SSE-NEXT: pcmpeqd %xmm13, %xmm5 ; SSE-NEXT: por %xmm9, %xmm5 ; SSE-NEXT: por %xmm10, %xmm5 ; SSE-NEXT: por %xmm7, %xmm8 ; SSE-NEXT: por %xmm2, %xmm8 ; SSE-NEXT: packssdw %xmm8, %xmm5 ; SSE-NEXT: pcmpeqd %xmm13, %xmm1 ; SSE-NEXT: packssdw %xmm6, %xmm1 ; SSE-NEXT: por %xmm5, %xmm1 ; SSE-NEXT: movdqa %xmm1, %xmm0 ; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3] ; SSE-NEXT: pslld $31, %xmm0 ; SSE-NEXT: psrad $31, %xmm0 ; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4,4,5,5,6,6,7,7] ; SSE-NEXT: pslld $31, %xmm1 ; SSE-NEXT: psrad $31, %xmm1 ; SSE-NEXT: retq ; ; AVX2-LABEL: PR63946: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,2,3,0,5,6,7,4] ; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm1[2,3,0,1,6,7,4,5] ; AVX2-NEXT: vpshufd {{.*#+}} ymm4 = ymm1[3,0,1,2,7,4,5,6] ; AVX2-NEXT: vpermq {{.*#+}} ymm5 = ymm1[2,3,0,1] ; AVX2-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1] ; AVX2-NEXT: vpermq {{.*#+}} ymm7 = ymm3[2,3,0,1] ; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm4[2,3,0,1] ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm1 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm2, %ymm2 ; AVX2-NEXT: vpor %ymm1, %ymm2, %ymm1 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm3, %ymm2 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm4, %ymm3 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm5, %ymm4 ; AVX2-NEXT: vpor %ymm4, %ymm2, %ymm2 ; AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm6, %ymm2 ; AVX2-NEXT: vpor %ymm2, %ymm3, %ymm2 ; AVX2-NEXT: vpor %ymm2, %ymm1, %ymm1 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm7, %ymm2 ; AVX2-NEXT: vpcmpeqd %ymm0, %ymm8, %ymm0 ; AVX2-NEXT: vpor %ymm0, %ymm2, %ymm0 ; AVX2-NEXT: vpor %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: vpslld $31, %ymm0, %ymm0 ; AVX2-NEXT: vpsrad $31, %ymm0, %ymm0 ; AVX2-NEXT: retq ; ; AVX512-LABEL: PR63946: ; AVX512: # %bb.0: # %entry ; AVX512-NEXT: # kill: def $ymm1 killed $ymm1 def $zmm1 ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vpshufd {{.*#+}} ymm2 = ymm1[1,2,3,0,5,6,7,4] ; AVX512-NEXT: vpshufd {{.*#+}} ymm3 = ymm1[2,3,0,1,6,7,4,5] ; AVX512-NEXT: vpshufd {{.*#+}} ymm4 = ymm1[3,0,1,2,7,4,5,6] ; AVX512-NEXT: vpermq {{.*#+}} ymm5 = ymm1[2,3,0,1] ; AVX512-NEXT: vpermq {{.*#+}} ymm6 = ymm2[2,3,0,1] ; AVX512-NEXT: vpermq {{.*#+}} ymm7 = ymm3[2,3,0,1] ; AVX512-NEXT: vpermq {{.*#+}} ymm8 = ymm4[2,3,0,1] ; AVX512-NEXT: vpcmpeqd %zmm1, %zmm0, %k0 ; AVX512-NEXT: vpcmpeqd %zmm0, %zmm2, %k1 ; AVX512-NEXT: vpcmpeqd %zmm0, %zmm3, %k3 ; AVX512-NEXT: vpcmpeqd %zmm0, %zmm4, %k2 ; AVX512-NEXT: vpcmpeqd %zmm0, %zmm5, %k4 ; AVX512-NEXT: vpcmpeqd %zmm0, %zmm6, %k5 ; AVX512-NEXT: vpcmpeqd %zmm0, %zmm7, %k6 ; AVX512-NEXT: vpcmpeqd %zmm0, %zmm8, %k7 ; AVX512-NEXT: korw %k0, %k1, %k0 ; AVX512-NEXT: korw %k3, %k0, %k0 ; AVX512-NEXT: korw %k4, %k0, %k0 ; AVX512-NEXT: korw %k2, %k0, %k0 ; AVX512-NEXT: korw %k5, %k0, %k0 ; AVX512-NEXT: korw %k6, %k0, %k0 ; AVX512-NEXT: korw %k7, %k0, %k1 ; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 ; AVX512-NEXT: retq entry: %shuffle = shufflevector <8 x i32> %b0, <8 x i32> poison, <8 x i32> %shuffle1 = shufflevector <8 x i32> %b0, <8 x i32> poison, <8 x i32> %shuffle2 = shufflevector <8 x i32> %shuffle, <8 x i32> poison, <8 x i32> %shuffle3 = shufflevector <8 x i32> %b0, <8 x i32> poison, <8 x i32> %shuffle4 = shufflevector <8 x i32> %shuffle, <8 x i32> poison, <8 x i32> %shuffle5 = shufflevector <8 x i32> %shuffle1, <8 x i32> poison, <8 x i32> %shuffle6 = shufflevector <8 x i32> %shuffle2, <8 x i32> poison, <8 x i32> %cmp = icmp eq <8 x i32> %a0, %b0 %cmp7 = icmp eq <8 x i32> %shuffle, %a0 %cmp9 = icmp eq <8 x i32> %shuffle1, %a0 %cmp11 = icmp eq <8 x i32> %shuffle2, %a0 %cmp13 = icmp eq <8 x i32> %shuffle3, %a0 %cmp15 = icmp eq <8 x i32> %shuffle4, %a0 %cmp17 = icmp eq <8 x i32> %shuffle5, %a0 %cmp19 = icmp eq <8 x i32> %shuffle6, %a0 %or2365 = or <8 x i1> %cmp7, %cmp %or2264 = or <8 x i1> %or2365, %cmp9 %or2567 = or <8 x i1> %or2264, %cmp13 %or2163 = or <8 x i1> %or2567, %cmp11 %or62 = or <8 x i1> %or2163, %cmp15 %or2466 = or <8 x i1> %or62, %cmp17 %or2668 = or <8 x i1> %or2466, %cmp19 %or26 = sext <8 x i1> %or2668 to <8 x i32> ret <8 x i32> %or26 }