; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+egpr -show-mc-encoding | FileCheck --check-prefix=AVX512 %s ; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512f,+avx512bw,+egpr -show-mc-encoding | FileCheck --check-prefix=AVX512BW %s define void @kmovkr_1(i1 %cmp23.not) { ; AVX512-LABEL: kmovkr_1: ; AVX512: # %bb.0: # %entry ; AVX512-NEXT: kmovw %edi, %k1 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x92,0xcf] ; AVX512-NEXT: vmovsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xff,0x89,0x10,0x05,A,A,A,A] ; AVX512-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; AVX512-NEXT: vmovsd %xmm0, 0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x04,0x25,0x00,0x00,0x00,0x00] ; AVX512-NEXT: retq # encoding: [0xc3] ; ; AVX512BW-LABEL: kmovkr_1: ; AVX512BW: # %bb.0: # %entry ; AVX512BW-NEXT: kmovd %edi, %k1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x92,0xcf] ; AVX512BW-NEXT: vmovsd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0 {%k1} {z} # encoding: [0x62,0xf1,0xff,0x89,0x10,0x05,A,A,A,A] ; AVX512BW-NEXT: # fixup A - offset: 6, value: {{\.?LCPI[0-9]+_[0-9]+}}-4, kind: reloc_riprel_4byte ; AVX512BW-NEXT: vmovsd %xmm0, 0 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x11,0x04,0x25,0x00,0x00,0x00,0x00] ; AVX512BW-NEXT: retq # encoding: [0xc3] entry: %0 = select i1 %cmp23.not, double 1.000000e+00, double 0.000000e+00 store double %0, ptr null, align 8 ret void } define void @kmovkr_2() { ; AVX512-LABEL: kmovkr_2: ; AVX512: # %bb.0: # %alloca_21 ; AVX512-NEXT: vxorps %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc0] ; AVX512-NEXT: movw $3, %ax # encoding: [0x66,0xb8,0x03,0x00] ; AVX512-NEXT: kmovw %eax, %k1 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x92,0xc8] ; AVX512-NEXT: vmovups %zmm0, 0 {%k1} # encoding: [0x62,0xf1,0x7c,0x49,0x11,0x04,0x25,0x00,0x00,0x00,0x00] ; AVX512-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; AVX512-NEXT: retq # encoding: [0xc3] ; ; AVX512BW-LABEL: kmovkr_2: ; AVX512BW: # %bb.0: # %alloca_21 ; AVX512BW-NEXT: vxorps %xmm0, %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x57,0xc0] ; AVX512BW-NEXT: movw $3, %ax # encoding: [0x66,0xb8,0x03,0x00] ; AVX512BW-NEXT: kmovd %eax, %k1 # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x92,0xc8] ; AVX512BW-NEXT: vmovups %zmm0, 0 {%k1} # encoding: [0x62,0xf1,0x7c,0x49,0x11,0x04,0x25,0x00,0x00,0x00,0x00] ; AVX512BW-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; AVX512BW-NEXT: retq # encoding: [0xc3] alloca_21: call void @llvm.masked.store.v4f32.p0(<4 x float> zeroinitializer, ptr null, i32 1, <4 x i1> ) ret void } define i32 @kmovrk_1(<4 x ptr> %arg) { ; AVX512-LABEL: kmovrk_1: ; AVX512: # %bb.0: # %bb ; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512-NEXT: vptestmq %zmm0, %zmm0, %k0 # encoding: [0x62,0xf2,0xfd,0x48,0x27,0xc0] ; AVX512-NEXT: kmovw %k0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x93,0xc0] ; AVX512-NEXT: testb $15, %al # encoding: [0xa8,0x0f] ; AVX512-NEXT: jne .LBB2_1 # encoding: [0x75,A] ; AVX512-NEXT: # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1 ; AVX512-NEXT: # %bb.2: # %bb3 ; AVX512-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; AVX512-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; AVX512-NEXT: retq # encoding: [0xc3] ; AVX512-NEXT: .LBB2_1: # %bb2 ; ; AVX512BW-LABEL: kmovrk_1: ; AVX512BW: # %bb.0: # %bb ; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 def $zmm0 ; AVX512BW-NEXT: vptestmq %zmm0, %zmm0, %k0 # encoding: [0x62,0xf2,0xfd,0x48,0x27,0xc0] ; AVX512BW-NEXT: kmovd %k0, %eax # EVEX TO VEX Compression encoding: [0xc5,0xfb,0x93,0xc0] ; AVX512BW-NEXT: testb $15, %al # encoding: [0xa8,0x0f] ; AVX512BW-NEXT: jne .LBB2_1 # encoding: [0x75,A] ; AVX512BW-NEXT: # fixup A - offset: 1, value: .LBB2_1-1, kind: FK_PCRel_1 ; AVX512BW-NEXT: # %bb.2: # %bb3 ; AVX512BW-NEXT: xorl %eax, %eax # encoding: [0x31,0xc0] ; AVX512BW-NEXT: vzeroupper # encoding: [0xc5,0xf8,0x77] ; AVX512BW-NEXT: retq # encoding: [0xc3] ; AVX512BW-NEXT: .LBB2_1: # %bb2 bb: %icmp = icmp ne <4 x ptr> %arg, zeroinitializer %freeze = freeze <4 x i1> %icmp %bitcast = bitcast <4 x i1> %freeze to i4 %icmp1 = icmp ne i4 %bitcast, 0 br i1 %icmp1, label %bb2, label %bb3 bb2: unreachable bb3: ret i32 0 } declare void @llvm.masked.store.v4f32.p0(<4 x float>, ptr nocapture, i32 immarg, <4 x i1>)