595 lines
23 KiB
LLVM
595 lines
23 KiB
LLVM
|
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
|
||
|
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING,RV32
|
||
|
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING,RV32
|
||
|
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,RV32
|
||
|
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=1 | FileCheck %s --check-prefixes=NO_FOLDING,RV64
|
||
|
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=2 | FileCheck %s --check-prefixes=NO_FOLDING,RV64
|
||
|
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - --riscv-lower-ext-max-web-size=3 | FileCheck %s --check-prefixes=FOLDING,RV64
|
||
|
; Check that the default value enables the web folding and
|
||
|
; that it is bigger than 3.
|
||
|
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING,RV32
|
||
|
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=FOLDING,RV64
|
||
|
|
||
|
; Check that the scalable vector add/sub/mul operations are all promoted into their
|
||
|
; vw counterpart when the folding of the web size is increased to 3.
|
||
|
; We need the web size to be at least 3 for the folding to happen, because
|
||
|
; %c has 3 uses.
|
||
|
; see https://github.com/llvm/llvm-project/pull/72340
|
||
|
|
||
|
define <vscale x 2 x i16> @vwop_vscale_sext_i8i16_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_sext_i8i16_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
|
||
|
; NO_FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v11, v8
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v8, v9
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v9, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_sext_i8i16_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
|
||
|
; FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; FOLDING-NEXT: vwmul.vv v11, v8, v9
|
||
|
; FOLDING-NEXT: vwadd.vv v9, v8, v10
|
||
|
; FOLDING-NEXT: vwsub.vv v12, v8, v10
|
||
|
; FOLDING-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
|
||
|
; FOLDING-NEXT: vor.vv v8, v11, v9
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v12
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i8>, ptr %x
|
||
|
%b = load <vscale x 2 x i8>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i8>, ptr %z
|
||
|
%c = sext <vscale x 2 x i8> %a to <vscale x 2 x i16>
|
||
|
%d = sext <vscale x 2 x i8> %b to <vscale x 2 x i16>
|
||
|
%d2 = sext <vscale x 2 x i8> %b2 to <vscale x 2 x i16>
|
||
|
%e = mul <vscale x 2 x i16> %c, %d
|
||
|
%f = add <vscale x 2 x i16> %c, %d2
|
||
|
%g = sub <vscale x 2 x i16> %c, %d2
|
||
|
%h = or <vscale x 2 x i16> %e, %f
|
||
|
%i = or <vscale x 2 x i16> %h, %g
|
||
|
ret <vscale x 2 x i16> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i32> @vwop_vscale_sext_i16i32_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_sext_i16i32_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; NO_FOLDING-NEXT: vle16.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vle16.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vle16.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v11, v8
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v8, v9
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v9, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_sext_i16i32_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
|
||
|
; FOLDING-NEXT: vle16.v v8, (a0)
|
||
|
; FOLDING-NEXT: vle16.v v9, (a1)
|
||
|
; FOLDING-NEXT: vle16.v v10, (a2)
|
||
|
; FOLDING-NEXT: vwmul.vv v11, v8, v9
|
||
|
; FOLDING-NEXT: vwadd.vv v9, v8, v10
|
||
|
; FOLDING-NEXT: vwsub.vv v12, v8, v10
|
||
|
; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
|
||
|
; FOLDING-NEXT: vor.vv v8, v11, v9
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v12
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i16>, ptr %x
|
||
|
%b = load <vscale x 2 x i16>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i16>, ptr %z
|
||
|
%c = sext <vscale x 2 x i16> %a to <vscale x 2 x i32>
|
||
|
%d = sext <vscale x 2 x i16> %b to <vscale x 2 x i32>
|
||
|
%d2 = sext <vscale x 2 x i16> %b2 to <vscale x 2 x i32>
|
||
|
%e = mul <vscale x 2 x i32> %c, %d
|
||
|
%f = add <vscale x 2 x i32> %c, %d2
|
||
|
%g = sub <vscale x 2 x i32> %c, %d2
|
||
|
%h = or <vscale x 2 x i32> %e, %f
|
||
|
%i = or <vscale x 2 x i32> %h, %g
|
||
|
ret <vscale x 2 x i32> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i64> @vwop_vscale_sext_i32i64_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_sext_i32i64_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vl1re32.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vl1re32.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vl1re32.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vsetvli a0, zero, e64, m2, ta, ma
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v12, v8
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v14, v9
|
||
|
; NO_FOLDING-NEXT: vsext.vf2 v8, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v10, v12, v14
|
||
|
; NO_FOLDING-NEXT: vadd.vv v14, v12, v8
|
||
|
; NO_FOLDING-NEXT: vsub.vv v8, v12, v8
|
||
|
; NO_FOLDING-NEXT: vor.vv v10, v10, v14
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v10, v8
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_sext_i32i64_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vl1re32.v v8, (a0)
|
||
|
; FOLDING-NEXT: vl1re32.v v9, (a1)
|
||
|
; FOLDING-NEXT: vl1re32.v v10, (a2)
|
||
|
; FOLDING-NEXT: vsetvli a0, zero, e32, m1, ta, ma
|
||
|
; FOLDING-NEXT: vwmul.vv v12, v8, v9
|
||
|
; FOLDING-NEXT: vwadd.vv v14, v8, v10
|
||
|
; FOLDING-NEXT: vwsub.vv v16, v8, v10
|
||
|
; FOLDING-NEXT: vsetvli zero, zero, e64, m2, ta, ma
|
||
|
; FOLDING-NEXT: vor.vv v8, v12, v14
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v16
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i32>, ptr %x
|
||
|
%b = load <vscale x 2 x i32>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i32>, ptr %z
|
||
|
%c = sext <vscale x 2 x i32> %a to <vscale x 2 x i64>
|
||
|
%d = sext <vscale x 2 x i32> %b to <vscale x 2 x i64>
|
||
|
%d2 = sext <vscale x 2 x i32> %b2 to <vscale x 2 x i64>
|
||
|
%e = mul <vscale x 2 x i64> %c, %d
|
||
|
%f = add <vscale x 2 x i64> %c, %d2
|
||
|
%g = sub <vscale x 2 x i64> %c, %d2
|
||
|
%h = or <vscale x 2 x i64> %e, %f
|
||
|
%i = or <vscale x 2 x i64> %h, %g
|
||
|
ret <vscale x 2 x i64> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i32> @vwop_vscale_sext_i1i32_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; RV32-LABEL: vwop_vscale_sext_i1i32_multiple_users:
|
||
|
; RV32: # %bb.0:
|
||
|
; RV32-NEXT: vsetvli a3, zero, e32, m1, ta, mu
|
||
|
; RV32-NEXT: vlm.v v8, (a0)
|
||
|
; RV32-NEXT: vlm.v v9, (a1)
|
||
|
; RV32-NEXT: vlm.v v10, (a2)
|
||
|
; RV32-NEXT: vmv.v.i v11, 0
|
||
|
; RV32-NEXT: vmv.v.v v0, v8
|
||
|
; RV32-NEXT: vmerge.vim v12, v11, -1, v0
|
||
|
; RV32-NEXT: vmv.v.v v0, v9
|
||
|
; RV32-NEXT: vmerge.vim v9, v11, -1, v0
|
||
|
; RV32-NEXT: vmv.v.v v0, v10
|
||
|
; RV32-NEXT: vmerge.vim v10, v11, -1, v0
|
||
|
; RV32-NEXT: vmul.vv v9, v12, v9
|
||
|
; RV32-NEXT: li a0, 1
|
||
|
; RV32-NEXT: vsub.vv v11, v12, v10
|
||
|
; RV32-NEXT: vmv.v.v v0, v8
|
||
|
; RV32-NEXT: vsub.vx v10, v10, a0, v0.t
|
||
|
; RV32-NEXT: vor.vv v8, v9, v10
|
||
|
; RV32-NEXT: vor.vv v8, v8, v11
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: vwop_vscale_sext_i1i32_multiple_users:
|
||
|
; RV64: # %bb.0:
|
||
|
; RV64-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; RV64-NEXT: vlm.v v8, (a0)
|
||
|
; RV64-NEXT: vlm.v v9, (a1)
|
||
|
; RV64-NEXT: vlm.v v10, (a2)
|
||
|
; RV64-NEXT: vmv.v.i v11, 0
|
||
|
; RV64-NEXT: vmv.v.v v0, v8
|
||
|
; RV64-NEXT: vmerge.vim v12, v11, -1, v0
|
||
|
; RV64-NEXT: vmv.v.v v0, v9
|
||
|
; RV64-NEXT: vmerge.vim v9, v11, -1, v0
|
||
|
; RV64-NEXT: vmv.v.v v0, v10
|
||
|
; RV64-NEXT: vmerge.vim v10, v11, -1, v0
|
||
|
; RV64-NEXT: vmul.vv v9, v12, v9
|
||
|
; RV64-NEXT: vmv.v.v v0, v8
|
||
|
; RV64-NEXT: vmerge.vim v8, v11, 1, v0
|
||
|
; RV64-NEXT: vsub.vv v8, v10, v8
|
||
|
; RV64-NEXT: vsub.vv v10, v12, v10
|
||
|
; RV64-NEXT: vor.vv v8, v9, v8
|
||
|
; RV64-NEXT: vor.vv v8, v8, v10
|
||
|
; RV64-NEXT: ret
|
||
|
%a = load <vscale x 2 x i1>, ptr %x
|
||
|
%b = load <vscale x 2 x i1>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i1>, ptr %z
|
||
|
%c = sext <vscale x 2 x i1> %a to <vscale x 2 x i32>
|
||
|
%d = sext <vscale x 2 x i1> %b to <vscale x 2 x i32>
|
||
|
%d2 = sext <vscale x 2 x i1> %b2 to <vscale x 2 x i32>
|
||
|
%e = mul <vscale x 2 x i32> %c, %d
|
||
|
%f = add <vscale x 2 x i32> %c, %d2
|
||
|
%g = sub <vscale x 2 x i32> %c, %d2
|
||
|
%h = or <vscale x 2 x i32> %e, %f
|
||
|
%i = or <vscale x 2 x i32> %h, %g
|
||
|
ret <vscale x 2 x i32> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i8> @vwop_vscale_sext_i1i8_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_sext_i1i8_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
|
||
|
; NO_FOLDING-NEXT: vlm.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vlm.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vlm.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vmv.v.i v11, 0
|
||
|
; NO_FOLDING-NEXT: vmv1r.v v0, v8
|
||
|
; NO_FOLDING-NEXT: vmerge.vim v12, v11, -1, v0
|
||
|
; NO_FOLDING-NEXT: vmv1r.v v0, v9
|
||
|
; NO_FOLDING-NEXT: vmerge.vim v9, v11, -1, v0
|
||
|
; NO_FOLDING-NEXT: vmv1r.v v0, v10
|
||
|
; NO_FOLDING-NEXT: vmerge.vim v10, v11, -1, v0
|
||
|
; NO_FOLDING-NEXT: vmul.vv v9, v12, v9
|
||
|
; NO_FOLDING-NEXT: vmv1r.v v0, v8
|
||
|
; NO_FOLDING-NEXT: vmerge.vim v8, v11, 1, v0
|
||
|
; NO_FOLDING-NEXT: vsub.vv v8, v10, v8
|
||
|
; NO_FOLDING-NEXT: vsub.vv v10, v12, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v9, v8
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_sext_i1i8_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
|
||
|
; FOLDING-NEXT: vlm.v v8, (a0)
|
||
|
; FOLDING-NEXT: vlm.v v9, (a1)
|
||
|
; FOLDING-NEXT: vlm.v v10, (a2)
|
||
|
; FOLDING-NEXT: vmv.v.i v11, 0
|
||
|
; FOLDING-NEXT: vmv1r.v v0, v8
|
||
|
; FOLDING-NEXT: vmerge.vim v12, v11, -1, v0
|
||
|
; FOLDING-NEXT: vmv1r.v v0, v9
|
||
|
; FOLDING-NEXT: vmerge.vim v9, v11, -1, v0
|
||
|
; FOLDING-NEXT: vmv1r.v v0, v10
|
||
|
; FOLDING-NEXT: vmerge.vim v10, v11, -1, v0
|
||
|
; FOLDING-NEXT: vmul.vv v9, v12, v9
|
||
|
; FOLDING-NEXT: vmv1r.v v0, v8
|
||
|
; FOLDING-NEXT: vmerge.vim v8, v11, 1, v0
|
||
|
; FOLDING-NEXT: vsub.vv v8, v10, v8
|
||
|
; FOLDING-NEXT: vsub.vv v10, v12, v10
|
||
|
; FOLDING-NEXT: vor.vv v8, v9, v8
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i1>, ptr %x
|
||
|
%b = load <vscale x 2 x i1>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i1>, ptr %z
|
||
|
%c = sext <vscale x 2 x i1> %a to <vscale x 2 x i8>
|
||
|
%d = sext <vscale x 2 x i1> %b to <vscale x 2 x i8>
|
||
|
%d2 = sext <vscale x 2 x i1> %b2 to <vscale x 2 x i8>
|
||
|
%e = mul <vscale x 2 x i8> %c, %d
|
||
|
%f = add <vscale x 2 x i8> %c, %d2
|
||
|
%g = sub <vscale x 2 x i8> %c, %d2
|
||
|
%h = or <vscale x 2 x i8> %e, %f
|
||
|
%i = or <vscale x 2 x i8> %h, %g
|
||
|
ret <vscale x 2 x i8> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i32> @vwop_vscale_sext_i8i32_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_sext_i8i32_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; NO_FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vsext.vf4 v11, v8
|
||
|
; NO_FOLDING-NEXT: vsext.vf4 v8, v9
|
||
|
; NO_FOLDING-NEXT: vsext.vf4 v9, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_sext_i8i32_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; FOLDING-NEXT: vsext.vf4 v11, v8
|
||
|
; FOLDING-NEXT: vsext.vf4 v8, v9
|
||
|
; FOLDING-NEXT: vsext.vf4 v9, v10
|
||
|
; FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i8>, ptr %x
|
||
|
%b = load <vscale x 2 x i8>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i8>, ptr %z
|
||
|
%c = sext <vscale x 2 x i8> %a to <vscale x 2 x i32>
|
||
|
%d = sext <vscale x 2 x i8> %b to <vscale x 2 x i32>
|
||
|
%d2 = sext <vscale x 2 x i8> %b2 to <vscale x 2 x i32>
|
||
|
%e = mul <vscale x 2 x i32> %c, %d
|
||
|
%f = add <vscale x 2 x i32> %c, %d2
|
||
|
%g = sub <vscale x 2 x i32> %c, %d2
|
||
|
%h = or <vscale x 2 x i32> %e, %f
|
||
|
%i = or <vscale x 2 x i32> %h, %g
|
||
|
ret <vscale x 2 x i32> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i16> @vwop_vscale_zext_i8i16_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_zext_i8i16_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
|
||
|
; NO_FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v11, v8
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v8, v9
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v9, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_zext_i8i16_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
|
||
|
; FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; FOLDING-NEXT: vwmulu.vv v11, v8, v9
|
||
|
; FOLDING-NEXT: vwaddu.vv v9, v8, v10
|
||
|
; FOLDING-NEXT: vwsubu.vv v12, v8, v10
|
||
|
; FOLDING-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
|
||
|
; FOLDING-NEXT: vor.vv v8, v11, v9
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v12
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i8>, ptr %x
|
||
|
%b = load <vscale x 2 x i8>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i8>, ptr %z
|
||
|
%c = zext <vscale x 2 x i8> %a to <vscale x 2 x i16>
|
||
|
%d = zext <vscale x 2 x i8> %b to <vscale x 2 x i16>
|
||
|
%d2 = zext <vscale x 2 x i8> %b2 to <vscale x 2 x i16>
|
||
|
%e = mul <vscale x 2 x i16> %c, %d
|
||
|
%f = add <vscale x 2 x i16> %c, %d2
|
||
|
%g = sub <vscale x 2 x i16> %c, %d2
|
||
|
%h = or <vscale x 2 x i16> %e, %f
|
||
|
%i = or <vscale x 2 x i16> %h, %g
|
||
|
ret <vscale x 2 x i16> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i32> @vwop_vscale_zext_i16i32_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_zext_i16i32_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; NO_FOLDING-NEXT: vle16.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vle16.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vle16.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v11, v8
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v8, v9
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v9, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_zext_i16i32_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e16, mf2, ta, ma
|
||
|
; FOLDING-NEXT: vle16.v v8, (a0)
|
||
|
; FOLDING-NEXT: vle16.v v9, (a1)
|
||
|
; FOLDING-NEXT: vle16.v v10, (a2)
|
||
|
; FOLDING-NEXT: vwmulu.vv v11, v8, v9
|
||
|
; FOLDING-NEXT: vwaddu.vv v9, v8, v10
|
||
|
; FOLDING-NEXT: vwsubu.vv v12, v8, v10
|
||
|
; FOLDING-NEXT: vsetvli zero, zero, e32, m1, ta, ma
|
||
|
; FOLDING-NEXT: vor.vv v8, v11, v9
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v12
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i16>, ptr %x
|
||
|
%b = load <vscale x 2 x i16>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i16>, ptr %z
|
||
|
%c = zext <vscale x 2 x i16> %a to <vscale x 2 x i32>
|
||
|
%d = zext <vscale x 2 x i16> %b to <vscale x 2 x i32>
|
||
|
%d2 = zext <vscale x 2 x i16> %b2 to <vscale x 2 x i32>
|
||
|
%e = mul <vscale x 2 x i32> %c, %d
|
||
|
%f = add <vscale x 2 x i32> %c, %d2
|
||
|
%g = sub <vscale x 2 x i32> %c, %d2
|
||
|
%h = or <vscale x 2 x i32> %e, %f
|
||
|
%i = or <vscale x 2 x i32> %h, %g
|
||
|
ret <vscale x 2 x i32> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i64> @vwop_vscale_zext_i32i64_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_zext_i32i64_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vl1re32.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vl1re32.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vl1re32.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vsetvli a0, zero, e64, m2, ta, ma
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v12, v8
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v14, v9
|
||
|
; NO_FOLDING-NEXT: vzext.vf2 v8, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v10, v12, v14
|
||
|
; NO_FOLDING-NEXT: vadd.vv v14, v12, v8
|
||
|
; NO_FOLDING-NEXT: vsub.vv v8, v12, v8
|
||
|
; NO_FOLDING-NEXT: vor.vv v10, v10, v14
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v10, v8
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_zext_i32i64_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vl1re32.v v8, (a0)
|
||
|
; FOLDING-NEXT: vl1re32.v v9, (a1)
|
||
|
; FOLDING-NEXT: vl1re32.v v10, (a2)
|
||
|
; FOLDING-NEXT: vsetvli a0, zero, e32, m1, ta, ma
|
||
|
; FOLDING-NEXT: vwmulu.vv v12, v8, v9
|
||
|
; FOLDING-NEXT: vwaddu.vv v14, v8, v10
|
||
|
; FOLDING-NEXT: vwsubu.vv v16, v8, v10
|
||
|
; FOLDING-NEXT: vsetvli zero, zero, e64, m2, ta, ma
|
||
|
; FOLDING-NEXT: vor.vv v8, v12, v14
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v16
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i32>, ptr %x
|
||
|
%b = load <vscale x 2 x i32>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i32>, ptr %z
|
||
|
%c = zext <vscale x 2 x i32> %a to <vscale x 2 x i64>
|
||
|
%d = zext <vscale x 2 x i32> %b to <vscale x 2 x i64>
|
||
|
%d2 = zext <vscale x 2 x i32> %b2 to <vscale x 2 x i64>
|
||
|
%e = mul <vscale x 2 x i64> %c, %d
|
||
|
%f = add <vscale x 2 x i64> %c, %d2
|
||
|
%g = sub <vscale x 2 x i64> %c, %d2
|
||
|
%h = or <vscale x 2 x i64> %e, %f
|
||
|
%i = or <vscale x 2 x i64> %h, %g
|
||
|
ret <vscale x 2 x i64> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i32> @vwop_vscale_zext_i1i32_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; RV32-LABEL: vwop_vscale_zext_i1i32_multiple_users:
|
||
|
; RV32: # %bb.0:
|
||
|
; RV32-NEXT: vsetvli a3, zero, e32, m1, ta, mu
|
||
|
; RV32-NEXT: vlm.v v0, (a0)
|
||
|
; RV32-NEXT: vlm.v v8, (a2)
|
||
|
; RV32-NEXT: vlm.v v9, (a1)
|
||
|
; RV32-NEXT: vmv.v.i v10, 0
|
||
|
; RV32-NEXT: vmerge.vim v11, v10, 1, v0
|
||
|
; RV32-NEXT: vmv.v.v v0, v8
|
||
|
; RV32-NEXT: vmerge.vim v8, v10, 1, v0
|
||
|
; RV32-NEXT: vadd.vv v10, v11, v8
|
||
|
; RV32-NEXT: vsub.vv v8, v11, v8
|
||
|
; RV32-NEXT: vmv.v.v v0, v9
|
||
|
; RV32-NEXT: vor.vv v10, v10, v11, v0.t
|
||
|
; RV32-NEXT: vor.vv v8, v10, v8
|
||
|
; RV32-NEXT: ret
|
||
|
;
|
||
|
; RV64-LABEL: vwop_vscale_zext_i1i32_multiple_users:
|
||
|
; RV64: # %bb.0:
|
||
|
; RV64-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; RV64-NEXT: vlm.v v0, (a0)
|
||
|
; RV64-NEXT: vlm.v v8, (a1)
|
||
|
; RV64-NEXT: vlm.v v9, (a2)
|
||
|
; RV64-NEXT: vmv.v.i v10, 0
|
||
|
; RV64-NEXT: vmerge.vim v11, v10, 1, v0
|
||
|
; RV64-NEXT: vmv.v.v v0, v8
|
||
|
; RV64-NEXT: vmerge.vim v8, v10, 1, v0
|
||
|
; RV64-NEXT: vmv.v.v v0, v9
|
||
|
; RV64-NEXT: vmerge.vim v9, v10, 1, v0
|
||
|
; RV64-NEXT: vmul.vv v8, v11, v8
|
||
|
; RV64-NEXT: vadd.vv v10, v11, v9
|
||
|
; RV64-NEXT: vsub.vv v9, v11, v9
|
||
|
; RV64-NEXT: vor.vv v8, v8, v10
|
||
|
; RV64-NEXT: vor.vv v8, v8, v9
|
||
|
; RV64-NEXT: ret
|
||
|
%a = load <vscale x 2 x i1>, ptr %x
|
||
|
%b = load <vscale x 2 x i1>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i1>, ptr %z
|
||
|
%c = zext <vscale x 2 x i1> %a to <vscale x 2 x i32>
|
||
|
%d = zext <vscale x 2 x i1> %b to <vscale x 2 x i32>
|
||
|
%d2 = zext <vscale x 2 x i1> %b2 to <vscale x 2 x i32>
|
||
|
%e = mul <vscale x 2 x i32> %c, %d
|
||
|
%f = add <vscale x 2 x i32> %c, %d2
|
||
|
%g = sub <vscale x 2 x i32> %c, %d2
|
||
|
%h = or <vscale x 2 x i32> %e, %f
|
||
|
%i = or <vscale x 2 x i32> %h, %g
|
||
|
ret <vscale x 2 x i32> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i8> @vwop_vscale_zext_i1i8_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_zext_i1i8_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
|
||
|
; NO_FOLDING-NEXT: vlm.v v0, (a0)
|
||
|
; NO_FOLDING-NEXT: vlm.v v8, (a1)
|
||
|
; NO_FOLDING-NEXT: vlm.v v9, (a2)
|
||
|
; NO_FOLDING-NEXT: vmv.v.i v10, 0
|
||
|
; NO_FOLDING-NEXT: vmerge.vim v11, v10, 1, v0
|
||
|
; NO_FOLDING-NEXT: vmv1r.v v0, v8
|
||
|
; NO_FOLDING-NEXT: vmerge.vim v8, v10, 1, v0
|
||
|
; NO_FOLDING-NEXT: vmv1r.v v0, v9
|
||
|
; NO_FOLDING-NEXT: vmerge.vim v9, v10, 1, v0
|
||
|
; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_zext_i1i8_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e8, mf4, ta, ma
|
||
|
; FOLDING-NEXT: vlm.v v0, (a0)
|
||
|
; FOLDING-NEXT: vlm.v v8, (a1)
|
||
|
; FOLDING-NEXT: vlm.v v9, (a2)
|
||
|
; FOLDING-NEXT: vmv.v.i v10, 0
|
||
|
; FOLDING-NEXT: vmerge.vim v11, v10, 1, v0
|
||
|
; FOLDING-NEXT: vmv1r.v v0, v8
|
||
|
; FOLDING-NEXT: vmerge.vim v8, v10, 1, v0
|
||
|
; FOLDING-NEXT: vmv1r.v v0, v9
|
||
|
; FOLDING-NEXT: vmerge.vim v9, v10, 1, v0
|
||
|
; FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i1>, ptr %x
|
||
|
%b = load <vscale x 2 x i1>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i1>, ptr %z
|
||
|
%c = zext <vscale x 2 x i1> %a to <vscale x 2 x i8>
|
||
|
%d = zext <vscale x 2 x i1> %b to <vscale x 2 x i8>
|
||
|
%d2 = zext <vscale x 2 x i1> %b2 to <vscale x 2 x i8>
|
||
|
%e = mul <vscale x 2 x i8> %c, %d
|
||
|
%f = add <vscale x 2 x i8> %c, %d2
|
||
|
%g = sub <vscale x 2 x i8> %c, %d2
|
||
|
%h = or <vscale x 2 x i8> %e, %f
|
||
|
%i = or <vscale x 2 x i8> %h, %g
|
||
|
ret <vscale x 2 x i8> %i
|
||
|
}
|
||
|
|
||
|
define <vscale x 2 x i32> @vwop_vscale_zext_i8i32_multiple_users(ptr %x, ptr %y, ptr %z) {
|
||
|
; NO_FOLDING-LABEL: vwop_vscale_zext_i8i32_multiple_users:
|
||
|
; NO_FOLDING: # %bb.0:
|
||
|
; NO_FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; NO_FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; NO_FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; NO_FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; NO_FOLDING-NEXT: vzext.vf4 v11, v8
|
||
|
; NO_FOLDING-NEXT: vzext.vf4 v8, v9
|
||
|
; NO_FOLDING-NEXT: vzext.vf4 v9, v10
|
||
|
; NO_FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; NO_FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; NO_FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; NO_FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; NO_FOLDING-NEXT: ret
|
||
|
;
|
||
|
; FOLDING-LABEL: vwop_vscale_zext_i8i32_multiple_users:
|
||
|
; FOLDING: # %bb.0:
|
||
|
; FOLDING-NEXT: vsetvli a3, zero, e32, m1, ta, ma
|
||
|
; FOLDING-NEXT: vle8.v v8, (a0)
|
||
|
; FOLDING-NEXT: vle8.v v9, (a1)
|
||
|
; FOLDING-NEXT: vle8.v v10, (a2)
|
||
|
; FOLDING-NEXT: vzext.vf4 v11, v8
|
||
|
; FOLDING-NEXT: vzext.vf4 v8, v9
|
||
|
; FOLDING-NEXT: vzext.vf4 v9, v10
|
||
|
; FOLDING-NEXT: vmul.vv v8, v11, v8
|
||
|
; FOLDING-NEXT: vadd.vv v10, v11, v9
|
||
|
; FOLDING-NEXT: vsub.vv v9, v11, v9
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v10
|
||
|
; FOLDING-NEXT: vor.vv v8, v8, v9
|
||
|
; FOLDING-NEXT: ret
|
||
|
%a = load <vscale x 2 x i8>, ptr %x
|
||
|
%b = load <vscale x 2 x i8>, ptr %y
|
||
|
%b2 = load <vscale x 2 x i8>, ptr %z
|
||
|
%c = zext <vscale x 2 x i8> %a to <vscale x 2 x i32>
|
||
|
%d = zext <vscale x 2 x i8> %b to <vscale x 2 x i32>
|
||
|
%d2 = zext <vscale x 2 x i8> %b2 to <vscale x 2 x i32>
|
||
|
%e = mul <vscale x 2 x i32> %c, %d
|
||
|
%f = add <vscale x 2 x i32> %c, %d2
|
||
|
%g = sub <vscale x 2 x i32> %c, %d2
|
||
|
%h = or <vscale x 2 x i32> %e, %f
|
||
|
%i = or <vscale x 2 x i32> %h, %g
|
||
|
ret <vscale x 2 x i32> %i
|
||
|
}
|
||
|
|
||
|
|
||
|
|