479 lines
14 KiB
LLVM
479 lines
14 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||
|
; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvbc \
|
||
|
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
|
||
|
; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvbc \
|
||
|
; RUN: -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
|
||
|
|
||
|
declare <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64(
|
||
|
<vscale x 1 x i64>,
|
||
|
<vscale x 1 x i64>,
|
||
|
<vscale x 1 x i64>,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 1 x i64> @intrinsic_vclmul_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, iXLen %2) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_vv_nxv1i64_nxv1i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
|
||
|
; CHECK-NEXT: vclmul.vv v8, v8, v9
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64(
|
||
|
<vscale x 1 x i64> undef,
|
||
|
<vscale x 1 x i64> %0,
|
||
|
<vscale x 1 x i64> %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 1 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64(
|
||
|
<vscale x 1 x i64>,
|
||
|
<vscale x 1 x i64>,
|
||
|
<vscale x 1 x i64>,
|
||
|
<vscale x 1 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 1 x i64> @intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv1i64_nxv1i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
|
||
|
; CHECK-NEXT: vclmul.vv v8, v9, v10, v0.t
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64(
|
||
|
<vscale x 1 x i64> %0,
|
||
|
<vscale x 1 x i64> %1,
|
||
|
<vscale x 1 x i64> %2,
|
||
|
<vscale x 1 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 1 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64(
|
||
|
<vscale x 2 x i64>,
|
||
|
<vscale x 2 x i64>,
|
||
|
<vscale x 2 x i64>,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 2 x i64> @intrinsic_vclmul_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, iXLen %2) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_vv_nxv2i64_nxv2i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
|
||
|
; CHECK-NEXT: vclmul.vv v8, v8, v10
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64(
|
||
|
<vscale x 2 x i64> undef,
|
||
|
<vscale x 2 x i64> %0,
|
||
|
<vscale x 2 x i64> %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 2 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64(
|
||
|
<vscale x 2 x i64>,
|
||
|
<vscale x 2 x i64>,
|
||
|
<vscale x 2 x i64>,
|
||
|
<vscale x 2 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 2 x i64> @intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv2i64_nxv2i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
|
||
|
; CHECK-NEXT: vclmul.vv v8, v10, v12, v0.t
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64(
|
||
|
<vscale x 2 x i64> %0,
|
||
|
<vscale x 2 x i64> %1,
|
||
|
<vscale x 2 x i64> %2,
|
||
|
<vscale x 2 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 2 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64(
|
||
|
<vscale x 4 x i64>,
|
||
|
<vscale x 4 x i64>,
|
||
|
<vscale x 4 x i64>,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 4 x i64> @intrinsic_vclmul_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, iXLen %2) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_vv_nxv4i64_nxv4i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
|
||
|
; CHECK-NEXT: vclmul.vv v8, v8, v12
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64(
|
||
|
<vscale x 4 x i64> undef,
|
||
|
<vscale x 4 x i64> %0,
|
||
|
<vscale x 4 x i64> %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 4 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64(
|
||
|
<vscale x 4 x i64>,
|
||
|
<vscale x 4 x i64>,
|
||
|
<vscale x 4 x i64>,
|
||
|
<vscale x 4 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 4 x i64> @intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv4i64_nxv4i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
|
||
|
; CHECK-NEXT: vclmul.vv v8, v12, v16, v0.t
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64(
|
||
|
<vscale x 4 x i64> %0,
|
||
|
<vscale x 4 x i64> %1,
|
||
|
<vscale x 4 x i64> %2,
|
||
|
<vscale x 4 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 4 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64(
|
||
|
<vscale x 8 x i64>,
|
||
|
<vscale x 8 x i64>,
|
||
|
<vscale x 8 x i64>,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 8 x i64> @intrinsic_vclmul_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, iXLen %2) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_vv_nxv8i64_nxv8i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
|
||
|
; CHECK-NEXT: vclmul.vv v8, v8, v16
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64(
|
||
|
<vscale x 8 x i64> undef,
|
||
|
<vscale x 8 x i64> %0,
|
||
|
<vscale x 8 x i64> %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 8 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64(
|
||
|
<vscale x 8 x i64>,
|
||
|
<vscale x 8 x i64>,
|
||
|
<vscale x 8 x i64>,
|
||
|
<vscale x 8 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 8 x i64> @intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
|
||
|
; CHECK-LABEL: intrinsic_vclmul_mask_vv_nxv8i64_nxv8i64:
|
||
|
; CHECK: # %bb.0: # %entry
|
||
|
; CHECK-NEXT: vl8re64.v v24, (a0)
|
||
|
; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
|
||
|
; CHECK-NEXT: vclmul.vv v8, v16, v24, v0.t
|
||
|
; CHECK-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64(
|
||
|
<vscale x 8 x i64> %0,
|
||
|
<vscale x 8 x i64> %1,
|
||
|
<vscale x 8 x i64> %2,
|
||
|
<vscale x 8 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 8 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64(
|
||
|
<vscale x 1 x i64>,
|
||
|
<vscale x 1 x i64>,
|
||
|
i64,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 1 x i64> @intrinsic_vclmul_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, iXLen %2) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_vx_nxv1i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
|
||
|
; RV32-NEXT: vlse64.v v9, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v8, v9
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_vx_nxv1i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
|
||
|
; RV64-NEXT: vclmul.vx v8, v8, a0
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64(
|
||
|
<vscale x 1 x i64> undef,
|
||
|
<vscale x 1 x i64> %0,
|
||
|
i64 %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 1 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64(
|
||
|
<vscale x 1 x i64>,
|
||
|
<vscale x 1 x i64>,
|
||
|
i64,
|
||
|
<vscale x 1 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 1 x i64> @intrinsic_vclmul_mask_vx_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
|
||
|
; RV32-NEXT: vlse64.v v10, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v9, v10, v0.t
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv1i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
|
||
|
; RV64-NEXT: vclmul.vx v8, v9, a0, v0.t
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64(
|
||
|
<vscale x 1 x i64> %0,
|
||
|
<vscale x 1 x i64> %1,
|
||
|
i64 %2,
|
||
|
<vscale x 1 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 1 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64(
|
||
|
<vscale x 2 x i64>,
|
||
|
<vscale x 2 x i64>,
|
||
|
i64,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 2 x i64> @intrinsic_vclmul_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, iXLen %2) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_vx_nxv2i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
|
||
|
; RV32-NEXT: vlse64.v v10, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v8, v10
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_vx_nxv2i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
|
||
|
; RV64-NEXT: vclmul.vx v8, v8, a0
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64(
|
||
|
<vscale x 2 x i64> undef,
|
||
|
<vscale x 2 x i64> %0,
|
||
|
i64 %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 2 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64(
|
||
|
<vscale x 2 x i64>,
|
||
|
<vscale x 2 x i64>,
|
||
|
i64,
|
||
|
<vscale x 2 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 2 x i64> @intrinsic_vclmul_mask_vx_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
|
||
|
; RV32-NEXT: vlse64.v v12, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v10, v12, v0.t
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv2i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
|
||
|
; RV64-NEXT: vclmul.vx v8, v10, a0, v0.t
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64(
|
||
|
<vscale x 2 x i64> %0,
|
||
|
<vscale x 2 x i64> %1,
|
||
|
i64 %2,
|
||
|
<vscale x 2 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 2 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64(
|
||
|
<vscale x 4 x i64>,
|
||
|
<vscale x 4 x i64>,
|
||
|
i64,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 4 x i64> @intrinsic_vclmul_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, iXLen %2) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_vx_nxv4i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
|
||
|
; RV32-NEXT: vlse64.v v12, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v8, v12
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_vx_nxv4i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
|
||
|
; RV64-NEXT: vclmul.vx v8, v8, a0
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64(
|
||
|
<vscale x 4 x i64> undef,
|
||
|
<vscale x 4 x i64> %0,
|
||
|
i64 %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 4 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64(
|
||
|
<vscale x 4 x i64>,
|
||
|
<vscale x 4 x i64>,
|
||
|
i64,
|
||
|
<vscale x 4 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 4 x i64> @intrinsic_vclmul_mask_vx_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
|
||
|
; RV32-NEXT: vlse64.v v16, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v12, v16, v0.t
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv4i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
|
||
|
; RV64-NEXT: vclmul.vx v8, v12, a0, v0.t
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64(
|
||
|
<vscale x 4 x i64> %0,
|
||
|
<vscale x 4 x i64> %1,
|
||
|
i64 %2,
|
||
|
<vscale x 4 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 4 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64(
|
||
|
<vscale x 8 x i64>,
|
||
|
<vscale x 8 x i64>,
|
||
|
i64,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 8 x i64> @intrinsic_vclmul_vx_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, iXLen %2) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_vx_nxv8i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
|
||
|
; RV32-NEXT: vlse64.v v16, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v8, v16
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_vx_nxv8i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
|
||
|
; RV64-NEXT: vclmul.vx v8, v8, a0
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64(
|
||
|
<vscale x 8 x i64> undef,
|
||
|
<vscale x 8 x i64> %0,
|
||
|
i64 %1,
|
||
|
iXLen %2)
|
||
|
|
||
|
ret <vscale x 8 x i64> %a
|
||
|
}
|
||
|
|
||
|
declare <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64(
|
||
|
<vscale x 8 x i64>,
|
||
|
<vscale x 8 x i64>,
|
||
|
i64,
|
||
|
<vscale x 8 x i1>,
|
||
|
iXLen,
|
||
|
iXLen)
|
||
|
|
||
|
define <vscale x 8 x i64> @intrinsic_vclmul_mask_vx_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
|
||
|
; RV32-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64:
|
||
|
; RV32: # %bb.0: # %entry
|
||
|
; RV32-NEXT: addi sp, sp, -16
|
||
|
; RV32-NEXT: sw a1, 12(sp)
|
||
|
; RV32-NEXT: sw a0, 8(sp)
|
||
|
; RV32-NEXT: addi a0, sp, 8
|
||
|
; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
|
||
|
; RV32-NEXT: vlse64.v v24, (a0), zero
|
||
|
; RV32-NEXT: vclmul.vv v8, v16, v24, v0.t
|
||
|
; RV32-NEXT: addi sp, sp, 16
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: intrinsic_vclmul_mask_vx_nxv8i64_i64:
|
||
|
; RV64: # %bb.0: # %entry
|
||
|
; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
|
||
|
; RV64-NEXT: vclmul.vx v8, v16, a0, v0.t
|
||
|
; RV64-NEXT: ret
|
||
|
entry:
|
||
|
%a = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64(
|
||
|
<vscale x 8 x i64> %0,
|
||
|
<vscale x 8 x i64> %1,
|
||
|
i64 %2,
|
||
|
<vscale x 8 x i1> %3,
|
||
|
iXLen %4, iXLen 1)
|
||
|
|
||
|
ret <vscale x 8 x i64> %a
|
||
|
}
|