; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 ; RUN: llc -mtriple=riscv32 -mattr=+d \ ; RUN: -verify-machineinstrs -target-abi=ilp32d < %s \ ; RUN: | FileCheck -check-prefixes=CHECK,RV32IFD %s ; RUN: llc -mtriple=riscv64 -mattr=+d \ ; RUN: -verify-machineinstrs -target-abi=lp64d < %s \ ; RUN: | FileCheck -check-prefixes=CHECK,RV64IFD %s declare half @llvm.exp10.f16(half) declare <1 x half> @llvm.exp10.v1f16(<1 x half>) declare <2 x half> @llvm.exp10.v2f16(<2 x half>) declare <3 x half> @llvm.exp10.v3f16(<3 x half>) declare <4 x half> @llvm.exp10.v4f16(<4 x half>) declare float @llvm.exp10.f32(float) declare <1 x float> @llvm.exp10.v1f32(<1 x float>) declare <2 x float> @llvm.exp10.v2f32(<2 x float>) declare <3 x float> @llvm.exp10.v3f32(<3 x float>) declare <4 x float> @llvm.exp10.v4f32(<4 x float>) declare double @llvm.exp10.f64(double) declare <1 x double> @llvm.exp10.v1f64(<1 x double>) declare <2 x double> @llvm.exp10.v2f64(<2 x double>) declare <3 x double> @llvm.exp10.v3f64(<3 x double>) declare <4 x double> @llvm.exp10.v4f64(<4 x double>) define half @exp10_f16(half %x) { ; RV32IFD-LABEL: exp10_f16: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.x.w a0, fa0 ; RV32IFD-NEXT: lui a1, 1048560 ; RV32IFD-NEXT: or a0, a0, a1 ; RV32IFD-NEXT: fmv.w.x fa0, a0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_f16: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -16 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.x.w a0, fa0 ; RV64IFD-NEXT: lui a1, 1048560 ; RV64IFD-NEXT: or a0, a0, a1 ; RV64IFD-NEXT: fmv.w.x fa0, a0 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret %r = call half @llvm.exp10.f16(half %x) ret half %r } define <1 x half> @exp10_v1f16(<1 x half> %x) { ; RV32IFD-LABEL: exp10_v1f16: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: fmv.w.x fa0, a0 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.x.w a0, fa0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v1f16: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -16 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: fmv.w.x fa0, a0 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.x.w a0, fa0 ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret %r = call <1 x half> @llvm.exp10.v1f16(<1 x half> %x) ret <1 x half> %r } define <2 x half> @exp10_v2f16(<2 x half> %x) { ; RV32IFD-LABEL: exp10_v2f16: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 8(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset s0, -8 ; RV32IFD-NEXT: .cfi_offset fs0, -16 ; RV32IFD-NEXT: fmv.w.x fs0, a1 ; RV32IFD-NEXT: fmv.w.x fa0, a0 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.x.w s0, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs0 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.x.w a1, fa0 ; RV32IFD-NEXT: mv a0, s0 ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 8(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v2f16: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -32 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s0, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s1, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset s0, -16 ; RV64IFD-NEXT: .cfi_offset s1, -24 ; RV64IFD-NEXT: mv s0, a1 ; RV64IFD-NEXT: fmv.w.x fa0, a0 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.x.w s1, fa0 ; RV64IFD-NEXT: fmv.w.x fa0, s0 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.x.w a1, fa0 ; RV64IFD-NEXT: mv a0, s1 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s0, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s1, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 32 ; RV64IFD-NEXT: ret %r = call <2 x half> @llvm.exp10.v2f16(<2 x half> %x) ret <2 x half> %r } define <3 x half> @exp10_v3f16(<3 x half> %x) { ; RV32IFD-LABEL: exp10_v3f16: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -48 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 36(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset s0, -8 ; RV32IFD-NEXT: .cfi_offset s1, -12 ; RV32IFD-NEXT: .cfi_offset fs0, -24 ; RV32IFD-NEXT: .cfi_offset fs1, -32 ; RV32IFD-NEXT: .cfi_offset fs2, -40 ; RV32IFD-NEXT: lhu a2, 8(a1) ; RV32IFD-NEXT: lhu a3, 0(a1) ; RV32IFD-NEXT: lhu a1, 4(a1) ; RV32IFD-NEXT: mv s0, a0 ; RV32IFD-NEXT: fmv.w.x fs0, a2 ; RV32IFD-NEXT: fmv.w.x fs1, a3 ; RV32IFD-NEXT: fmv.w.x fa0, a1 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.s fs2, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs1 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.x.w a0, fs2 ; RV32IFD-NEXT: slli s1, a0, 16 ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.x.w a0, fa0 ; RV32IFD-NEXT: slli a0, a0, 16 ; RV32IFD-NEXT: srli a0, a0, 16 ; RV32IFD-NEXT: or s1, a0, s1 ; RV32IFD-NEXT: fmv.s fa0, fs0 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.x.w a0, fa0 ; RV32IFD-NEXT: sh a0, 4(s0) ; RV32IFD-NEXT: sw s1, 0(s0) ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 36(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 48 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v3f16: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -48 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s1, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s2, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset s0, -16 ; RV64IFD-NEXT: .cfi_offset s1, -24 ; RV64IFD-NEXT: .cfi_offset s2, -32 ; RV64IFD-NEXT: .cfi_offset fs0, -40 ; RV64IFD-NEXT: lhu s1, 16(a1) ; RV64IFD-NEXT: lhu s2, 0(a1) ; RV64IFD-NEXT: lhu a1, 8(a1) ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: fmv.w.x fa0, a1 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.s fs0, fa0 ; RV64IFD-NEXT: fmv.w.x fa0, s2 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.x.w a0, fs0 ; RV64IFD-NEXT: slli s2, a0, 16 ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.x.w a0, fa0 ; RV64IFD-NEXT: slli a0, a0, 48 ; RV64IFD-NEXT: srli a0, a0, 48 ; RV64IFD-NEXT: or s2, a0, s2 ; RV64IFD-NEXT: fmv.w.x fa0, s1 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.x.w a0, fa0 ; RV64IFD-NEXT: sh a0, 4(s0) ; RV64IFD-NEXT: sw s2, 0(s0) ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s2, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 48 ; RV64IFD-NEXT: ret %r = call <3 x half> @llvm.exp10.v3f16(<3 x half> %x) ret <3 x half> %r } define <4 x half> @exp10_v4f16(<4 x half> %x) { ; RV32IFD-LABEL: exp10_v4f16: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -64 ; RV32IFD-NEXT: .cfi_def_cfa_offset 64 ; RV32IFD-NEXT: sw ra, 60(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 56(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s1, 52(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s2, 48(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s3, 44(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset s0, -8 ; RV32IFD-NEXT: .cfi_offset s1, -12 ; RV32IFD-NEXT: .cfi_offset s2, -16 ; RV32IFD-NEXT: .cfi_offset s3, -20 ; RV32IFD-NEXT: .cfi_offset fs0, -32 ; RV32IFD-NEXT: .cfi_offset fs1, -40 ; RV32IFD-NEXT: .cfi_offset fs2, -48 ; RV32IFD-NEXT: .cfi_offset fs3, -56 ; RV32IFD-NEXT: mv s0, a0 ; RV32IFD-NEXT: lhu a0, 12(a1) ; RV32IFD-NEXT: lhu a2, 0(a1) ; RV32IFD-NEXT: lhu a3, 4(a1) ; RV32IFD-NEXT: lhu a1, 8(a1) ; RV32IFD-NEXT: fmv.w.x fs0, a0 ; RV32IFD-NEXT: fmv.w.x fs1, a2 ; RV32IFD-NEXT: fmv.w.x fs2, a3 ; RV32IFD-NEXT: fmv.w.x fa0, a1 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.s fs3, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs2 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.s fs2, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs1 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.s fs1, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs0 ; RV32IFD-NEXT: call __extendhfsf2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.x.w s1, fs1 ; RV32IFD-NEXT: fmv.x.w s2, fs2 ; RV32IFD-NEXT: fmv.x.w s3, fs3 ; RV32IFD-NEXT: call __truncsfhf2 ; RV32IFD-NEXT: fmv.x.w a0, fa0 ; RV32IFD-NEXT: sh a0, 6(s0) ; RV32IFD-NEXT: sh s3, 4(s0) ; RV32IFD-NEXT: sh s2, 2(s0) ; RV32IFD-NEXT: sh s1, 0(s0) ; RV32IFD-NEXT: lw ra, 60(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 56(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s1, 52(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s2, 48(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s3, 44(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 64 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v4f16: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -64 ; RV64IFD-NEXT: .cfi_def_cfa_offset 64 ; RV64IFD-NEXT: sd ra, 56(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s0, 48(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s1, 40(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s2, 32(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s3, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset s0, -16 ; RV64IFD-NEXT: .cfi_offset s1, -24 ; RV64IFD-NEXT: .cfi_offset s2, -32 ; RV64IFD-NEXT: .cfi_offset s3, -40 ; RV64IFD-NEXT: .cfi_offset fs0, -48 ; RV64IFD-NEXT: .cfi_offset fs1, -56 ; RV64IFD-NEXT: .cfi_offset fs2, -64 ; RV64IFD-NEXT: lhu s1, 24(a1) ; RV64IFD-NEXT: lhu s2, 0(a1) ; RV64IFD-NEXT: lhu s3, 8(a1) ; RV64IFD-NEXT: lhu a1, 16(a1) ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: fmv.w.x fa0, a1 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.s fs0, fa0 ; RV64IFD-NEXT: fmv.w.x fa0, s3 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.s fs1, fa0 ; RV64IFD-NEXT: fmv.w.x fa0, s2 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.s fs2, fa0 ; RV64IFD-NEXT: fmv.w.x fa0, s1 ; RV64IFD-NEXT: call __extendhfsf2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.x.w s1, fs2 ; RV64IFD-NEXT: fmv.x.w s2, fs1 ; RV64IFD-NEXT: fmv.x.w s3, fs0 ; RV64IFD-NEXT: call __truncsfhf2 ; RV64IFD-NEXT: fmv.x.w a0, fa0 ; RV64IFD-NEXT: sh a0, 6(s0) ; RV64IFD-NEXT: sh s3, 4(s0) ; RV64IFD-NEXT: sh s2, 2(s0) ; RV64IFD-NEXT: sh s1, 0(s0) ; RV64IFD-NEXT: ld ra, 56(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s0, 48(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s1, 40(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s2, 32(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s3, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 64 ; RV64IFD-NEXT: ret %r = call <4 x half> @llvm.exp10.v4f16(<4 x half> %x) ret <4 x half> %r } define float @exp10_f32(float %x) { ; CHECK-LABEL: exp10_f32: ; CHECK: # %bb.0: ; CHECK-NEXT: tail exp10f %r = call float @llvm.exp10.f32(float %x) ret float %r } define <1 x float> @exp10_v1f32(<1 x float> %x) { ; RV32IFD-LABEL: exp10_v1f32: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -16 ; RV32IFD-NEXT: .cfi_def_cfa_offset 16 ; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 16 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v1f32: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -16 ; RV64IFD-NEXT: .cfi_def_cfa_offset 16 ; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 16 ; RV64IFD-NEXT: ret %r = call <1 x float> @llvm.exp10.v1f32(<1 x float> %x) ret <1 x float> %r } define <2 x float> @exp10_v2f32(<2 x float> %x) { ; RV32IFD-LABEL: exp10_v2f32: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -32 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset fs0, -16 ; RV32IFD-NEXT: .cfi_offset fs1, -24 ; RV32IFD-NEXT: fmv.s fs0, fa1 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.s fs1, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs0 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.s fa1, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs1 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v2f32: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -32 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset fs0, -16 ; RV64IFD-NEXT: .cfi_offset fs1, -24 ; RV64IFD-NEXT: fmv.s fs0, fa1 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.s fs1, fa0 ; RV64IFD-NEXT: fmv.s fa0, fs0 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.s fa1, fa0 ; RV64IFD-NEXT: fmv.s fa0, fs1 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 32 ; RV64IFD-NEXT: ret %r = call <2 x float> @llvm.exp10.v2f32(<2 x float> %x) ret <2 x float> %r } define <3 x float> @exp10_v3f32(<3 x float> %x) { ; RV32IFD-LABEL: exp10_v3f32: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -32 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset s0, -8 ; RV32IFD-NEXT: .cfi_offset fs0, -16 ; RV32IFD-NEXT: .cfi_offset fs1, -24 ; RV32IFD-NEXT: .cfi_offset fs2, -32 ; RV32IFD-NEXT: fmv.s fs0, fa2 ; RV32IFD-NEXT: fmv.s fs1, fa1 ; RV32IFD-NEXT: mv s0, a0 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.s fs2, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs1 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.s fs1, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs0 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fsw fa0, 8(s0) ; RV32IFD-NEXT: fsw fs1, 4(s0) ; RV32IFD-NEXT: fsw fs2, 0(s0) ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v3f32: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -48 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s1, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset s0, -16 ; RV64IFD-NEXT: .cfi_offset s1, -24 ; RV64IFD-NEXT: .cfi_offset fs0, -32 ; RV64IFD-NEXT: .cfi_offset fs1, -40 ; RV64IFD-NEXT: fmv.s fs0, fa2 ; RV64IFD-NEXT: fmv.s fs1, fa0 ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: fmv.s fa0, fa1 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.x.w a0, fa0 ; RV64IFD-NEXT: slli s1, a0, 32 ; RV64IFD-NEXT: fmv.s fa0, fs1 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.x.w a0, fa0 ; RV64IFD-NEXT: slli a0, a0, 32 ; RV64IFD-NEXT: srli a0, a0, 32 ; RV64IFD-NEXT: or s1, a0, s1 ; RV64IFD-NEXT: fmv.s fa0, fs0 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fsw fa0, 8(s0) ; RV64IFD-NEXT: sd s1, 0(s0) ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s1, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 48 ; RV64IFD-NEXT: ret %r = call <3 x float> @llvm.exp10.v3f32(<3 x float> %x) ret <3 x float> %r } define <4 x float> @exp10_v4f32(<4 x float> %x) { ; RV32IFD-LABEL: exp10_v4f32: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -48 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset s0, -8 ; RV32IFD-NEXT: .cfi_offset fs0, -16 ; RV32IFD-NEXT: .cfi_offset fs1, -24 ; RV32IFD-NEXT: .cfi_offset fs2, -32 ; RV32IFD-NEXT: .cfi_offset fs3, -40 ; RV32IFD-NEXT: fmv.s fs0, fa3 ; RV32IFD-NEXT: fmv.s fs1, fa2 ; RV32IFD-NEXT: fmv.s fs2, fa1 ; RV32IFD-NEXT: mv s0, a0 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.s fs3, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs2 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.s fs2, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs1 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fmv.s fs1, fa0 ; RV32IFD-NEXT: fmv.s fa0, fs0 ; RV32IFD-NEXT: call exp10f ; RV32IFD-NEXT: fsw fa0, 12(s0) ; RV32IFD-NEXT: fsw fs1, 8(s0) ; RV32IFD-NEXT: fsw fs2, 4(s0) ; RV32IFD-NEXT: fsw fs3, 0(s0) ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 48 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v4f32: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -48 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs3, 0(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset s0, -16 ; RV64IFD-NEXT: .cfi_offset fs0, -24 ; RV64IFD-NEXT: .cfi_offset fs1, -32 ; RV64IFD-NEXT: .cfi_offset fs2, -40 ; RV64IFD-NEXT: .cfi_offset fs3, -48 ; RV64IFD-NEXT: fmv.s fs0, fa3 ; RV64IFD-NEXT: fmv.s fs1, fa2 ; RV64IFD-NEXT: fmv.s fs2, fa1 ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.s fs3, fa0 ; RV64IFD-NEXT: fmv.s fa0, fs2 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.s fs2, fa0 ; RV64IFD-NEXT: fmv.s fa0, fs1 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fmv.s fs1, fa0 ; RV64IFD-NEXT: fmv.s fa0, fs0 ; RV64IFD-NEXT: call exp10f ; RV64IFD-NEXT: fsw fa0, 12(s0) ; RV64IFD-NEXT: fsw fs1, 8(s0) ; RV64IFD-NEXT: fsw fs2, 4(s0) ; RV64IFD-NEXT: fsw fs3, 0(s0) ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 48 ; RV64IFD-NEXT: ret %r = call <4 x float> @llvm.exp10.v4f32(<4 x float> %x) ret <4 x float> %r } define double @exp10_f64(double %x) { ; CHECK-LABEL: exp10_f64: ; CHECK: # %bb.0: ; CHECK-NEXT: tail exp10 %r = call double @llvm.exp10.f64(double %x) ret double %r } ; FIXME: Broken ; define <1 x double> @exp10_v1f64(<1 x double> %x) { ; %r = call <1 x double> @llvm.exp10.v1f64(<1 x double> %x) ; ret <1 x double> %r ; } define <2 x double> @exp10_v2f64(<2 x double> %x) { ; RV32IFD-LABEL: exp10_v2f64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -32 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset fs0, -16 ; RV32IFD-NEXT: .cfi_offset fs1, -24 ; RV32IFD-NEXT: fmv.d fs0, fa1 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fmv.d fs1, fa0 ; RV32IFD-NEXT: fmv.d fa0, fs0 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fmv.d fa1, fa0 ; RV32IFD-NEXT: fmv.d fa0, fs1 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v2f64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -32 ; RV64IFD-NEXT: .cfi_def_cfa_offset 32 ; RV64IFD-NEXT: sd ra, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset fs0, -16 ; RV64IFD-NEXT: .cfi_offset fs1, -24 ; RV64IFD-NEXT: fmv.d fs0, fa1 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fmv.d fs1, fa0 ; RV64IFD-NEXT: fmv.d fa0, fs0 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fmv.d fa1, fa0 ; RV64IFD-NEXT: fmv.d fa0, fs1 ; RV64IFD-NEXT: ld ra, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 32 ; RV64IFD-NEXT: ret %r = call <2 x double> @llvm.exp10.v2f64(<2 x double> %x) ret <2 x double> %r } define <3 x double> @exp10_v3f64(<3 x double> %x) { ; RV32IFD-LABEL: exp10_v3f64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -32 ; RV32IFD-NEXT: .cfi_def_cfa_offset 32 ; RV32IFD-NEXT: sw ra, 28(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 24(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs2, 0(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset s0, -8 ; RV32IFD-NEXT: .cfi_offset fs0, -16 ; RV32IFD-NEXT: .cfi_offset fs1, -24 ; RV32IFD-NEXT: .cfi_offset fs2, -32 ; RV32IFD-NEXT: fmv.d fs0, fa2 ; RV32IFD-NEXT: fmv.d fs1, fa1 ; RV32IFD-NEXT: mv s0, a0 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fmv.d fs2, fa0 ; RV32IFD-NEXT: fmv.d fa0, fs1 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fmv.d fs1, fa0 ; RV32IFD-NEXT: fmv.d fa0, fs0 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fsd fa0, 16(s0) ; RV32IFD-NEXT: fsd fs1, 8(s0) ; RV32IFD-NEXT: fsd fs2, 0(s0) ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 24(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs2, 0(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v3f64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -48 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset s0, -16 ; RV64IFD-NEXT: .cfi_offset fs0, -24 ; RV64IFD-NEXT: .cfi_offset fs1, -32 ; RV64IFD-NEXT: .cfi_offset fs2, -40 ; RV64IFD-NEXT: fmv.d fs0, fa2 ; RV64IFD-NEXT: fmv.d fs1, fa1 ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fmv.d fs2, fa0 ; RV64IFD-NEXT: fmv.d fa0, fs1 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fmv.d fs1, fa0 ; RV64IFD-NEXT: fmv.d fa0, fs0 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fsd fa0, 16(s0) ; RV64IFD-NEXT: fsd fs1, 8(s0) ; RV64IFD-NEXT: fsd fs2, 0(s0) ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 48 ; RV64IFD-NEXT: ret %r = call <3 x double> @llvm.exp10.v3f64(<3 x double> %x) ret <3 x double> %r } define <4 x double> @exp10_v4f64(<4 x double> %x) { ; RV32IFD-LABEL: exp10_v4f64: ; RV32IFD: # %bb.0: ; RV32IFD-NEXT: addi sp, sp, -48 ; RV32IFD-NEXT: .cfi_def_cfa_offset 48 ; RV32IFD-NEXT: sw ra, 44(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: sw s0, 40(sp) # 4-byte Folded Spill ; RV32IFD-NEXT: fsd fs0, 32(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs1, 24(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs2, 16(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: fsd fs3, 8(sp) # 8-byte Folded Spill ; RV32IFD-NEXT: .cfi_offset ra, -4 ; RV32IFD-NEXT: .cfi_offset s0, -8 ; RV32IFD-NEXT: .cfi_offset fs0, -16 ; RV32IFD-NEXT: .cfi_offset fs1, -24 ; RV32IFD-NEXT: .cfi_offset fs2, -32 ; RV32IFD-NEXT: .cfi_offset fs3, -40 ; RV32IFD-NEXT: fmv.d fs0, fa3 ; RV32IFD-NEXT: fmv.d fs1, fa2 ; RV32IFD-NEXT: fmv.d fs2, fa1 ; RV32IFD-NEXT: mv s0, a0 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fmv.d fs3, fa0 ; RV32IFD-NEXT: fmv.d fa0, fs2 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fmv.d fs2, fa0 ; RV32IFD-NEXT: fmv.d fa0, fs1 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fmv.d fs1, fa0 ; RV32IFD-NEXT: fmv.d fa0, fs0 ; RV32IFD-NEXT: call exp10 ; RV32IFD-NEXT: fsd fa0, 24(s0) ; RV32IFD-NEXT: fsd fs1, 16(s0) ; RV32IFD-NEXT: fsd fs2, 8(s0) ; RV32IFD-NEXT: fsd fs3, 0(s0) ; RV32IFD-NEXT: lw ra, 44(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: lw s0, 40(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: fld fs0, 32(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs1, 24(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs2, 16(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: fld fs3, 8(sp) # 8-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 48 ; RV32IFD-NEXT: ret ; ; RV64IFD-LABEL: exp10_v4f64: ; RV64IFD: # %bb.0: ; RV64IFD-NEXT: addi sp, sp, -48 ; RV64IFD-NEXT: .cfi_def_cfa_offset 48 ; RV64IFD-NEXT: sd ra, 40(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: sd s0, 32(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs0, 24(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs1, 16(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs2, 8(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: fsd fs3, 0(sp) # 8-byte Folded Spill ; RV64IFD-NEXT: .cfi_offset ra, -8 ; RV64IFD-NEXT: .cfi_offset s0, -16 ; RV64IFD-NEXT: .cfi_offset fs0, -24 ; RV64IFD-NEXT: .cfi_offset fs1, -32 ; RV64IFD-NEXT: .cfi_offset fs2, -40 ; RV64IFD-NEXT: .cfi_offset fs3, -48 ; RV64IFD-NEXT: fmv.d fs0, fa3 ; RV64IFD-NEXT: fmv.d fs1, fa2 ; RV64IFD-NEXT: fmv.d fs2, fa1 ; RV64IFD-NEXT: mv s0, a0 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fmv.d fs3, fa0 ; RV64IFD-NEXT: fmv.d fa0, fs2 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fmv.d fs2, fa0 ; RV64IFD-NEXT: fmv.d fa0, fs1 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fmv.d fs1, fa0 ; RV64IFD-NEXT: fmv.d fa0, fs0 ; RV64IFD-NEXT: call exp10 ; RV64IFD-NEXT: fsd fa0, 24(s0) ; RV64IFD-NEXT: fsd fs1, 16(s0) ; RV64IFD-NEXT: fsd fs2, 8(s0) ; RV64IFD-NEXT: fsd fs3, 0(s0) ; RV64IFD-NEXT: ld ra, 40(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: ld s0, 32(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs0, 24(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs1, 16(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs2, 8(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: fld fs3, 0(sp) # 8-byte Folded Reload ; RV64IFD-NEXT: addi sp, sp, 48 ; RV64IFD-NEXT: ret %r = call <4 x double> @llvm.exp10.v4f64(<4 x double> %x) ret <4 x double> %r }