; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=SSE ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512F ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512fp16,+avx512vl | FileCheck %s --check-prefixes=AVX512,AVX512FP16 define <2 x double> @prefer_f32_v2f64(ptr %p) nounwind { ; SSE-LABEL: prefer_f32_v2f64: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: retq ; ; AVX-LABEL: prefer_f32_v2f64: ; AVX: # %bb.0: # %entry ; AVX-NEXT: vbroadcastss (%rdi), %xmm0 ; AVX-NEXT: vcvtps2pd %xmm0, %xmm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: prefer_f32_v2f64: ; AVX512: # %bb.0: # %entry ; AVX512-NEXT: vcvtps2pd (%rdi){1to2}, %xmm0 ; AVX512-NEXT: retq entry: %0 = load float, ptr %p, align 4 %vecinit.i = insertelement <2 x float> undef, float %0, i64 0 %vecinit3.i = shufflevector <2 x float> %vecinit.i, <2 x float> poison, <2 x i32> zeroinitializer %conv.i = fpext <2 x float> %vecinit3.i to <2 x double> ret <2 x double> %conv.i } define <4 x double> @prefer_f32_v4f64(ptr %p) nounwind { ; SSE-LABEL: prefer_f32_v4f64: ; SSE: # %bb.0: # %entry ; SSE-NEXT: movss {{.*#+}} xmm0 = mem[0],zero,zero,zero ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: cvtps2pd %xmm0, %xmm0 ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: retq ; ; AVX-LABEL: prefer_f32_v4f64: ; AVX: # %bb.0: # %entry ; AVX-NEXT: vbroadcastss (%rdi), %xmm0 ; AVX-NEXT: vcvtps2pd %xmm0, %ymm0 ; AVX-NEXT: retq ; ; AVX512-LABEL: prefer_f32_v4f64: ; AVX512: # %bb.0: # %entry ; AVX512-NEXT: vcvtps2pd (%rdi){1to4}, %ymm0 ; AVX512-NEXT: retq entry: %0 = load float, ptr %p, align 4 %vecinit.i = insertelement <4 x float> undef, float %0, i64 0 %vecinit3.i = shufflevector <4 x float> %vecinit.i, <4 x float> poison, <4 x i32> zeroinitializer %conv.i = fpext <4 x float> %vecinit3.i to <4 x double> ret <4 x double> %conv.i } define <4 x float> @prefer_f16_v4f32(ptr %p) nounwind { ; SSE-LABEL: prefer_f16_v4f32: ; SSE: # %bb.0: # %entry ; SSE-NEXT: pushq %rax ; SSE-NEXT: pinsrw $0, (%rdi), %xmm0 ; SSE-NEXT: callq __extendhfsf2@PLT ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: popq %rax ; SSE-NEXT: retq ; ; AVX1-LABEL: prefer_f16_v4f32: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: pushq %rax ; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: callq __extendhfsf2@PLT ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; AVX1-NEXT: popq %rax ; AVX1-NEXT: retq ; ; AVX2-LABEL: prefer_f16_v4f32: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: pushq %rax ; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 ; AVX2-NEXT: callq __extendhfsf2@PLT ; AVX2-NEXT: vpbroadcastd %xmm0, %xmm0 ; AVX2-NEXT: popq %rax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: prefer_f16_v4f32: ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 ; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v4f32: ; AVX512FP16: # %bb.0: # %entry ; AVX512FP16-NEXT: vcvtph2psx (%rdi){1to4}, %xmm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4 %vecinit.i = insertelement <4 x half> undef, half %0, i64 0 %vecinit3.i = shufflevector <4 x half> %vecinit.i, <4 x half> poison, <4 x i32> zeroinitializer %conv.i = fpext <4 x half> %vecinit3.i to <4 x float> ret <4 x float> %conv.i } define <8 x float> @prefer_f16_v8f32(ptr %p) nounwind { ; SSE-LABEL: prefer_f16_v8f32: ; SSE: # %bb.0: # %entry ; SSE-NEXT: pushq %rax ; SSE-NEXT: pinsrw $0, (%rdi), %xmm0 ; SSE-NEXT: callq __extendhfsf2@PLT ; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: popq %rax ; SSE-NEXT: retq ; ; AVX1-LABEL: prefer_f16_v8f32: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: pushq %rax ; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: callq __extendhfsf2@PLT ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: popq %rax ; AVX1-NEXT: retq ; ; AVX2-LABEL: prefer_f16_v8f32: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: pushq %rax ; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 ; AVX2-NEXT: callq __extendhfsf2@PLT ; AVX2-NEXT: vpbroadcastd %xmm0, %ymm0 ; AVX2-NEXT: popq %rax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: prefer_f16_v8f32: ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 ; AVX512F-NEXT: vcvtph2ps %xmm0, %ymm0 ; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v8f32: ; AVX512FP16: # %bb.0: # %entry ; AVX512FP16-NEXT: vcvtph2psx (%rdi){1to8}, %ymm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4 %vecinit.i = insertelement <8 x half> undef, half %0, i64 0 %vecinit3.i = shufflevector <8 x half> %vecinit.i, <8 x half> poison, <8 x i32> zeroinitializer %conv.i = fpext <8 x half> %vecinit3.i to <8 x float> ret <8 x float> %conv.i } define <2 x double> @prefer_f16_v2f64(ptr %p) nounwind { ; SSE-LABEL: prefer_f16_v2f64: ; SSE: # %bb.0: # %entry ; SSE-NEXT: pushq %rax ; SSE-NEXT: pinsrw $0, (%rdi), %xmm0 ; SSE-NEXT: callq __extendhfsf2@PLT ; SSE-NEXT: cvtss2sd %xmm0, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] ; SSE-NEXT: popq %rax ; SSE-NEXT: retq ; ; AVX-LABEL: prefer_f16_v2f64: ; AVX: # %bb.0: # %entry ; AVX-NEXT: pushq %rax ; AVX-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 ; AVX-NEXT: callq __extendhfsf2@PLT ; AVX-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 ; AVX-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX-NEXT: popq %rax ; AVX-NEXT: retq ; ; AVX512F-LABEL: prefer_f16_v2f64: ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1 ; AVX512F-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3] ; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vcvtps2pd %xmm0, %xmm0 ; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v2f64: ; AVX512FP16: # %bb.0: # %entry ; AVX512FP16-NEXT: vcvtph2pd (%rdi){1to2}, %xmm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4 %vecinit.i = insertelement <2 x half> undef, half %0, i64 0 %vecinit3.i = shufflevector <2 x half> %vecinit.i, <2 x half> poison, <2 x i32> zeroinitializer %conv.i = fpext <2 x half> %vecinit3.i to <2 x double> ret <2 x double> %conv.i } define <4 x double> @prefer_f16_v4f64(ptr %p) nounwind { ; SSE-LABEL: prefer_f16_v4f64: ; SSE: # %bb.0: # %entry ; SSE-NEXT: pushq %rax ; SSE-NEXT: pinsrw $0, (%rdi), %xmm0 ; SSE-NEXT: callq __extendhfsf2@PLT ; SSE-NEXT: cvtss2sd %xmm0, %xmm0 ; SSE-NEXT: movlhps {{.*#+}} xmm0 = xmm0[0,0] ; SSE-NEXT: movaps %xmm0, %xmm1 ; SSE-NEXT: popq %rax ; SSE-NEXT: retq ; ; AVX1-LABEL: prefer_f16_v4f64: ; AVX1: # %bb.0: # %entry ; AVX1-NEXT: pushq %rax ; AVX1-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 ; AVX1-NEXT: callq __extendhfsf2@PLT ; AVX1-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 ; AVX1-NEXT: vmovddup {{.*#+}} xmm0 = xmm0[0,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: popq %rax ; AVX1-NEXT: retq ; ; AVX2-LABEL: prefer_f16_v4f64: ; AVX2: # %bb.0: # %entry ; AVX2-NEXT: pushq %rax ; AVX2-NEXT: vpinsrw $0, (%rdi), %xmm0, %xmm0 ; AVX2-NEXT: callq __extendhfsf2@PLT ; AVX2-NEXT: vcvtss2sd %xmm0, %xmm0, %xmm0 ; AVX2-NEXT: vbroadcastsd %xmm0, %ymm0 ; AVX2-NEXT: popq %rax ; AVX2-NEXT: retq ; ; AVX512F-LABEL: prefer_f16_v4f64: ; AVX512F: # %bb.0: # %entry ; AVX512F-NEXT: vpbroadcastw (%rdi), %xmm0 ; AVX512F-NEXT: vcvtph2ps %xmm0, %xmm0 ; AVX512F-NEXT: vcvtps2pd %xmm0, %ymm0 ; AVX512F-NEXT: retq ; ; AVX512FP16-LABEL: prefer_f16_v4f64: ; AVX512FP16: # %bb.0: # %entry ; AVX512FP16-NEXT: vcvtph2pd (%rdi){1to4}, %ymm0 ; AVX512FP16-NEXT: retq entry: %0 = load half, ptr %p, align 4 %vecinit.i = insertelement <4 x half> undef, half %0, i64 0 %vecinit3.i = shufflevector <4 x half> %vecinit.i, <4 x half> poison, <4 x i32> zeroinitializer %conv.i = fpext <4 x half> %vecinit3.i to <4 x double> ret <4 x double> %conv.i }