; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=ilp32d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 ; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+zvfh,+v,+m -target-abi=lp64d \ ; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 declare <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8>, <2 x i1>, i32) define <2 x i8> @vp_ctpop_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: ret %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl) ret <2 x i8> %v } define <2 x i8> @vp_ctpop_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl) ret <2 x i8> %v } declare <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8>, <4 x i1>, i32) define <4 x i8> @vp_ctpop_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: ret %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) ret <4 x i8> %v } define <4 x i8> @vp_ctpop_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) ret <4 x i8> %v } declare <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8>, <8 x i1>, i32) define <8 x i8> @vp_ctpop_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: ret %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl) ret <8 x i8> %v } define <8 x i8> @vp_ctpop_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl) ret <8 x i8> %v } declare <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8>, <16 x i1>, i32) define <16 x i8> @vp_ctpop_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i8: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: vand.vi v8, v8, 15, v0.t ; CHECK-NEXT: ret %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl) ret <16 x i8> %v } define <16 x i8> @vp_ctpop_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i8_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: li a0, 85 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: li a0, 51 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: vand.vi v8, v8, 15 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl) ret <16 x i8> %v } declare <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16>, <2 x i1>, i32) define <2 x i16> @vp_ctpop_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t ; CHECK-NEXT: ret %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl) ret <2 x i16> %v } define <2 x i16> @vp_ctpop_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl) ret <2 x i16> %v } declare <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16>, <4 x i1>, i32) define <4 x i16> @vp_ctpop_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t ; CHECK-NEXT: ret %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) ret <4 x i16> %v } define <4 x i16> @vp_ctpop_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) ret <4 x i16> %v } declare <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16>, <8 x i1>, i32) define <8 x i16> @vp_ctpop_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t ; CHECK-NEXT: ret %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl) ret <8 x i16> %v } define <8 x i16> @vp_ctpop_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl) ret <8 x i16> %v } declare <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16>, <16 x i1>, i32) define <16 x i16> @vp_ctpop_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v10, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 8, v0.t ; CHECK-NEXT: ret %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl) ret <16 x i16> %v } define <16 x i16> @vp_ctpop_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: lui a0, 5 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: lui a0, 3 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: lui a0, 1 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: li a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 8 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl) ret <16 x i16> %v } declare <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32>, <2 x i1>, i32) define <2 x i32> @vp_ctpop_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t ; CHECK-NEXT: ret %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl) ret <2 x i32> %v } define <2 x i32> @vp_ctpop_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 24 ; CHECK-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl) ret <2 x i32> %v } declare <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32>, <4 x i1>, i32) define <4 x i32> @vp_ctpop_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t ; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t ; CHECK-NEXT: ret %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) ret <4 x i32> %v } define <4 x i32> @vp_ctpop_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v4i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vsrl.vi v9, v8, 1 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v9, v9, a0 ; CHECK-NEXT: vsub.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v9, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v9, v8 ; CHECK-NEXT: vsrl.vi v9, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v9 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 24 ; CHECK-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) ret <4 x i32> %v } declare <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32>, <8 x i1>, i32) define <8 x i32> @vp_ctpop_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v10, v10, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v10, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t ; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t ; CHECK-NEXT: ret %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl) ret <8 x i32> %v } define <8 x i32> @vp_ctpop_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v8i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma ; CHECK-NEXT: vsrl.vi v10, v8, 1 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v10, v10, a0 ; CHECK-NEXT: vsub.vv v8, v8, v10 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v10, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v10, v8 ; CHECK-NEXT: vsrl.vi v10, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v10 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 24 ; CHECK-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl) ret <8 x i32> %v } declare <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32>, <16 x i1>, i32) define <16 x i32> @vp_ctpop_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v12, v12, a0, v0.t ; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v12, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t ; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t ; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0, v0.t ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t ; CHECK-NEXT: vsrl.vi v8, v8, 24, v0.t ; CHECK-NEXT: ret %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl) ret <16 x i32> %v } define <16 x i32> @vp_ctpop_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { ; CHECK-LABEL: vp_ctpop_v16i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma ; CHECK-NEXT: vsrl.vi v12, v8, 1 ; CHECK-NEXT: lui a0, 349525 ; CHECK-NEXT: addi a0, a0, 1365 ; CHECK-NEXT: vand.vx v12, v12, a0 ; CHECK-NEXT: vsub.vv v8, v8, v12 ; CHECK-NEXT: lui a0, 209715 ; CHECK-NEXT: addi a0, a0, 819 ; CHECK-NEXT: vand.vx v12, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 2 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: vadd.vv v8, v12, v8 ; CHECK-NEXT: vsrl.vi v12, v8, 4 ; CHECK-NEXT: vadd.vv v8, v8, v12 ; CHECK-NEXT: lui a0, 61681 ; CHECK-NEXT: addi a0, a0, -241 ; CHECK-NEXT: vand.vx v8, v8, a0 ; CHECK-NEXT: lui a0, 4112 ; CHECK-NEXT: addi a0, a0, 257 ; CHECK-NEXT: vmul.vx v8, v8, a0 ; CHECK-NEXT: vsrl.vi v8, v8, 24 ; CHECK-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl) ret <16 x i32> %v } declare <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64>, <2 x i1>, i32) define <2 x i64> @vp_ctpop_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v2i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vand.vv v9, v9, v10, v0.t ; RV32-NEXT: vsub.vv v8, v8, v9, v0.t ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vand.vv v10, v8, v9, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: vadd.vv v8, v10, v8, v0.t ; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t ; RV32-NEXT: vadd.vv v8, v8, v9, v0.t ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vand.vv v8, v8, v9, v0.t ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v9, v0.t ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v2i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v9, v9, a0, v0.t ; RV64-NEXT: vsub.vv v8, v8, v9, v0.t ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v9, v8, a0, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: vadd.vv v8, v9, v8, v0.t ; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t ; RV64-NEXT: vadd.vv v8, v8, v9, v0.t ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl) ret <2 x i64> %v } define <2 x i64> @vp_ctpop_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v2i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vsrl.vi v9, v8, 1 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vand.vv v9, v9, v10 ; RV32-NEXT: vsub.vv v8, v8, v9 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vand.vv v10, v8, v9 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: vadd.vv v8, v10, v8 ; RV32-NEXT: vsrl.vi v9, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v9 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vand.vv v8, v8, v9 ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: vsetvli a2, zero, e32, m1, ta, ma ; RV32-NEXT: vmv.v.x v9, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v9 ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v2i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; RV64-NEXT: vsrl.vi v9, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v9, v9, a0 ; RV64-NEXT: vsub.vv v8, v8, v9 ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v9, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vadd.vv v8, v9, v8 ; RV64-NEXT: vsrl.vi v9, v8, 4 ; RV64-NEXT: vadd.vv v8, v8, v9 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement <2 x i1> poison, i1 true, i32 0 %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl) ret <2 x i64> %v } declare <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64>, <4 x i1>, i32) define <4 x i64> @vp_ctpop_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v4i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vand.vv v10, v10, v12, v0.t ; RV32-NEXT: vsub.vv v8, v8, v10, v0.t ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vand.vv v12, v8, v10, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: vadd.vv v8, v12, v8, v0.t ; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t ; RV32-NEXT: vadd.vv v8, v8, v10, v0.t ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vand.vv v8, v8, v10, v0.t ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v10, v0.t ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v4i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v10, v10, a0, v0.t ; RV64-NEXT: vsub.vv v8, v8, v10, v0.t ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v10, v8, a0, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: vadd.vv v8, v10, v8, v0.t ; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t ; RV64-NEXT: vadd.vv v8, v8, v10, v0.t ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) ret <4 x i64> %v } define <4 x i64> @vp_ctpop_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v4i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vsrl.vi v10, v8, 1 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vand.vv v10, v10, v12 ; RV32-NEXT: vsub.vv v8, v8, v10 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vand.vv v12, v8, v10 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: vadd.vv v8, v12, v8 ; RV32-NEXT: vsrl.vi v10, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v10 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vand.vv v8, v8, v10 ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: vsetvli a2, zero, e32, m2, ta, ma ; RV32-NEXT: vmv.v.x v10, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v10 ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v4i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma ; RV64-NEXT: vsrl.vi v10, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v10, v10, a0 ; RV64-NEXT: vsub.vv v8, v8, v10 ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v10, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vadd.vv v8, v10, v8 ; RV64-NEXT: vsrl.vi v10, v8, 4 ; RV64-NEXT: vadd.vv v8, v8, v10 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement <4 x i1> poison, i1 true, i32 0 %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) ret <4 x i64> %v } declare <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64>, <8 x i1>, i32) define <8 x i64> @vp_ctpop_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v8i64: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v16, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vand.vv v12, v12, v16, v0.t ; RV32-NEXT: vsub.vv v8, v8, v12, v0.t ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vand.vv v16, v8, v12, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: vadd.vv v8, v16, v8, v0.t ; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t ; RV32-NEXT: vadd.vv v8, v8, v12, v0.t ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vand.vv v8, v8, v12, v0.t ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v12, v0.t ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v8i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v12, v12, a0, v0.t ; RV64-NEXT: vsub.vv v8, v8, v12, v0.t ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v12, v8, a0, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: vadd.vv v8, v12, v8, v0.t ; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t ; RV64-NEXT: vadd.vv v8, v8, v12, v0.t ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl) ret <8 x i64> %v } define <8 x i64> @vp_ctpop_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v8i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vsrl.vi v12, v8, 1 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v16, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vand.vv v12, v12, v16 ; RV32-NEXT: vsub.vv v8, v8, v12 ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vand.vv v16, v8, v12 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: vadd.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v12, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v12 ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vand.vv v8, v8, v12 ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma ; RV32-NEXT: vmv.v.x v12, a1 ; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v12 ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v8i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma ; RV64-NEXT: vsrl.vi v12, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v12, v12, a0 ; RV64-NEXT: vsub.vv v8, v8, v12 ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v12, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vadd.vv v8, v12, v8 ; RV64-NEXT: vsrl.vi v12, v8, 4 ; RV64-NEXT: vadd.vv v8, v8, v12 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement <8 x i1> poison, i1 true, i32 0 %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl) ret <8 x i64> %v } declare <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64>, <15 x i1>, i32) define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v15i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: .cfi_def_cfa_offset 32 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: sw a1, 28(sp) ; RV32-NEXT: sw a1, 24(sp) ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: sw a1, 20(sp) ; RV32-NEXT: sw a1, 16(sp) ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: sw a1, 0(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV32-NEXT: addi a1, sp, 24 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24, v0.t ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v24, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vadd.vv v8, v24, v8, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: addi a1, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: mv a1, sp ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v15i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v16, a0, v0.t ; RV64-NEXT: vsub.vv v8, v8, v16, v0.t ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v8, a0, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: vadd.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl) ret <15 x i64> %v } define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v15i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: .cfi_def_cfa_offset 32 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: sw a1, 28(sp) ; RV32-NEXT: sw a1, 24(sp) ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: sw a1, 20(sp) ; RV32-NEXT: sw a1, 16(sp) ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: sw a1, 0(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: addi a1, sp, 24 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v24, v8, v16 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vadd.vv v8, v24, v8 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi a1, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: mv a1, sp ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v15i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v16, a0 ; RV64-NEXT: vsub.vv v8, v8, v16 ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vadd.vv v8, v16, v8 ; RV64-NEXT: vsrl.vi v16, v8, 4 ; RV64-NEXT: vadd.vv v8, v8, v16 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement <15 x i1> poison, i1 true, i32 0 %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl) ret <15 x i64> %v } declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32) define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v16i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: .cfi_def_cfa_offset 32 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: sw a1, 28(sp) ; RV32-NEXT: sw a1, 24(sp) ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: sw a1, 20(sp) ; RV32-NEXT: sw a1, 16(sp) ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: sw a1, 0(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV32-NEXT: addi a1, sp, 24 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24, v0.t ; RV32-NEXT: vsub.vv v8, v8, v16, v0.t ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v24, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vadd.vv v8, v24, v8, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: addi a1, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: mv a1, sp ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v16i64: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v16, a0, v0.t ; RV64-NEXT: vsub.vv v8, v8, v16, v0.t ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v8, a0, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: vadd.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0, v0.t ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0, v0.t ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t ; RV64-NEXT: ret %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl) ret <16 x i64> %v } define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v16i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -32 ; RV32-NEXT: .cfi_def_cfa_offset 32 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: sw a1, 28(sp) ; RV32-NEXT: sw a1, 24(sp) ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: sw a1, 20(sp) ; RV32-NEXT: sw a1, 16(sp) ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: sw a1, 12(sp) ; RV32-NEXT: sw a1, 8(sp) ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: sw a1, 4(sp) ; RV32-NEXT: sw a1, 0(sp) ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: addi a1, sp, 24 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi a1, sp, 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v24, v8, v16 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: vadd.vv v8, v24, v8 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi a1, sp, 8 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: mv a1, sp ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a1), zero ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: li a0, 56 ; RV32-NEXT: vsrl.vx v8, v8, a0 ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v16i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1 ; RV64-NEXT: lui a0, 349525 ; RV64-NEXT: addiw a0, a0, 1365 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v16, a0 ; RV64-NEXT: vsub.vv v8, v8, v16 ; RV64-NEXT: lui a0, 209715 ; RV64-NEXT: addiw a0, a0, 819 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v16, v8, a0 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: vadd.vv v8, v16, v8 ; RV64-NEXT: vsrl.vi v16, v8, 4 ; RV64-NEXT: vadd.vv v8, v8, v16 ; RV64-NEXT: lui a0, 61681 ; RV64-NEXT: addiw a0, a0, -241 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vand.vx v8, v8, a0 ; RV64-NEXT: lui a0, 4112 ; RV64-NEXT: addiw a0, a0, 257 ; RV64-NEXT: slli a1, a0, 32 ; RV64-NEXT: add a0, a0, a1 ; RV64-NEXT: vmul.vx v8, v8, a0 ; RV64-NEXT: li a0, 56 ; RV64-NEXT: vsrl.vx v8, v8, a0 ; RV64-NEXT: ret %head = insertelement <16 x i1> poison, i1 true, i32 0 %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl) ret <16 x i64> %v } declare <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64>, <32 x i1>, i32) define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v32i64: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -48 ; RV32-NEXT: .cfi_def_cfa_offset 48 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 48 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 48 * vlenb ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 40 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 48 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV32-NEXT: vslidedown.vi v1, v0, 2 ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: sw a1, 44(sp) ; RV32-NEXT: sw a1, 40(sp) ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: sw a1, 36(sp) ; RV32-NEXT: sw a1, 32(sp) ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: sw a1, 28(sp) ; RV32-NEXT: sw a1, 24(sp) ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: sw a1, 20(sp) ; RV32-NEXT: li a2, 16 ; RV32-NEXT: sw a1, 16(sp) ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: bltu a0, a2, .LBB34_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB34_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV32-NEXT: addi a2, sp, 40 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: csrr a3, vlenb ; RV32-NEXT: li a4, 24 ; RV32-NEXT: mul a3, a3, a4 ; RV32-NEXT: add a3, sp, a3 ; RV32-NEXT: addi a3, a3, 48 ; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill ; RV32-NEXT: vlse64.v v8, (a2), zero ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 5 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v8, v0.t ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: li a3, 24 ; RV32-NEXT: mul a2, a2, a3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload ; RV32-NEXT: vsub.vv v24, v8, v16, v0.t ; RV32-NEXT: addi a2, sp, 32 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a2), zero ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: li a3, 24 ; RV32-NEXT: mul a2, a2, a3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v24, v8, v0.t ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsrl.vi v16, v24, 2, v0.t ; RV32-NEXT: vand.vv v16, v16, v8, v0.t ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload ; RV32-NEXT: vadd.vv v16, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t ; RV32-NEXT: vadd.vv v16, v16, v8, v0.t ; RV32-NEXT: addi a2, sp, 24 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a2), zero ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v8, v16, v8, v0.t ; RV32-NEXT: addi a2, sp, 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a2), zero ; RV32-NEXT: addi a2, sp, 48 ; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsrl.vx v8, v8, a1, v0.t ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: vmv1r.v v0, v1 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a2, 40 ; RV32-NEXT: mul a0, a0, a2 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v24, v8, v0.t ; RV32-NEXT: vsub.vv v8, v16, v8, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a2, 24 ; RV32-NEXT: mul a0, a0, a2 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v24, v8, v16, v0.t ; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: vadd.vv v8, v24, v8, v0.t ; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV32-NEXT: vadd.vv v8, v8, v16, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v16, v0.t ; RV32-NEXT: addi a0, sp, 48 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vmul.vv v8, v8, v16, v0.t ; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 48 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v32i64: ; RV64: # %bb.0: ; RV64-NEXT: addi sp, sp, -16 ; RV64-NEXT: .cfi_def_cfa_offset 16 ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 4 ; RV64-NEXT: sub sp, sp, a1 ; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb ; RV64-NEXT: csrr a1, vlenb ; RV64-NEXT: slli a1, a1, 3 ; RV64-NEXT: add a1, sp, a1 ; RV64-NEXT: addi a1, a1, 16 ; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma ; RV64-NEXT: li a2, 16 ; RV64-NEXT: vslidedown.vi v24, v0, 2 ; RV64-NEXT: mv a1, a0 ; RV64-NEXT: bltu a0, a2, .LBB34_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB34_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV64-NEXT: lui a1, 349525 ; RV64-NEXT: addiw a1, a1, 1365 ; RV64-NEXT: slli a2, a1, 32 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: vand.vx v16, v16, a1, v0.t ; RV64-NEXT: vsub.vv v8, v8, v16, v0.t ; RV64-NEXT: lui a2, 209715 ; RV64-NEXT: addiw a2, a2, 819 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: vand.vx v16, v8, a2, v0.t ; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t ; RV64-NEXT: vand.vx v8, v8, a2, v0.t ; RV64-NEXT: vadd.vv v8, v16, v8, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t ; RV64-NEXT: lui a3, 61681 ; RV64-NEXT: addiw a3, a3, -241 ; RV64-NEXT: slli a4, a3, 32 ; RV64-NEXT: add a3, a3, a4 ; RV64-NEXT: vand.vx v8, v8, a3, v0.t ; RV64-NEXT: lui a4, 4112 ; RV64-NEXT: addiw a4, a4, 257 ; RV64-NEXT: slli a5, a4, 32 ; RV64-NEXT: add a4, a4, a5 ; RV64-NEXT: vmul.vx v8, v8, a4, v0.t ; RV64-NEXT: li a5, 56 ; RV64-NEXT: vsrl.vx v8, v8, a5, v0.t ; RV64-NEXT: addi a6, sp, 16 ; RV64-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill ; RV64-NEXT: addi a6, a0, -16 ; RV64-NEXT: sltu a0, a0, a6 ; RV64-NEXT: addi a0, a0, -1 ; RV64-NEXT: and a0, a0, a6 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vmv1r.v v0, v24 ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 3 ; RV64-NEXT: add a0, sp, a0 ; RV64-NEXT: addi a0, a0, 16 ; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t ; RV64-NEXT: vand.vx v16, v16, a1, v0.t ; RV64-NEXT: vsub.vv v16, v8, v16, v0.t ; RV64-NEXT: vand.vx v8, v16, a2, v0.t ; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t ; RV64-NEXT: vand.vx v16, v16, a2, v0.t ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t ; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t ; RV64-NEXT: vadd.vv v8, v8, v16, v0.t ; RV64-NEXT: vand.vx v8, v8, a3, v0.t ; RV64-NEXT: vmul.vx v8, v8, a4, v0.t ; RV64-NEXT: vsrl.vx v16, v8, a5, v0.t ; RV64-NEXT: addi a0, sp, 16 ; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV64-NEXT: csrr a0, vlenb ; RV64-NEXT: slli a0, a0, 4 ; RV64-NEXT: add sp, sp, a0 ; RV64-NEXT: addi sp, sp, 16 ; RV64-NEXT: ret %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl) ret <32 x i64> %v } define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { ; RV32-LABEL: vp_ctpop_v32i64_unmasked: ; RV32: # %bb.0: ; RV32-NEXT: addi sp, sp, -48 ; RV32-NEXT: .cfi_def_cfa_offset 48 ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: li a2, 40 ; RV32-NEXT: mul a1, a1, a2 ; RV32-NEXT: sub sp, sp, a1 ; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x30, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 48 + 40 * vlenb ; RV32-NEXT: csrr a1, vlenb ; RV32-NEXT: slli a1, a1, 5 ; RV32-NEXT: add a1, sp, a1 ; RV32-NEXT: addi a1, a1, 48 ; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill ; RV32-NEXT: lui a1, 349525 ; RV32-NEXT: addi a1, a1, 1365 ; RV32-NEXT: sw a1, 44(sp) ; RV32-NEXT: sw a1, 40(sp) ; RV32-NEXT: lui a1, 209715 ; RV32-NEXT: addi a1, a1, 819 ; RV32-NEXT: sw a1, 36(sp) ; RV32-NEXT: sw a1, 32(sp) ; RV32-NEXT: lui a1, 61681 ; RV32-NEXT: addi a1, a1, -241 ; RV32-NEXT: sw a1, 28(sp) ; RV32-NEXT: sw a1, 24(sp) ; RV32-NEXT: lui a1, 4112 ; RV32-NEXT: addi a1, a1, 257 ; RV32-NEXT: sw a1, 20(sp) ; RV32-NEXT: li a2, 16 ; RV32-NEXT: sw a1, 16(sp) ; RV32-NEXT: mv a1, a0 ; RV32-NEXT: bltu a0, a2, .LBB35_2 ; RV32-NEXT: # %bb.1: ; RV32-NEXT: li a1, 16 ; RV32-NEXT: .LBB35_2: ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: addi a2, sp, 40 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v24, (a2), zero ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: li a3, 24 ; RV32-NEXT: mul a2, a2, a3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsub.vv v8, v8, v16 ; RV32-NEXT: addi a2, sp, 32 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v0, (a2), zero ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v8, v0 ; RV32-NEXT: vsrl.vi v8, v8, 2 ; RV32-NEXT: vand.vv v8, v8, v0 ; RV32-NEXT: vadd.vv v8, v16, v8 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: addi a2, sp, 24 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v16, (a2), zero ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 4 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vand.vv v16, v8, v16 ; RV32-NEXT: addi a2, sp, 16 ; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma ; RV32-NEXT: vlse64.v v8, (a2), zero ; RV32-NEXT: addi a2, sp, 48 ; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV32-NEXT: vmul.vv v16, v16, v8 ; RV32-NEXT: li a1, 56 ; RV32-NEXT: vsrl.vx v8, v16, a1 ; RV32-NEXT: csrr a2, vlenb ; RV32-NEXT: slli a2, a2, 3 ; RV32-NEXT: add a2, sp, a2 ; RV32-NEXT: addi a2, a2, 48 ; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill ; RV32-NEXT: addi a2, a0, -16 ; RV32-NEXT: sltu a0, a0, a2 ; RV32-NEXT: addi a0, a0, -1 ; RV32-NEXT: and a0, a0, a2 ; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 5 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vsrl.vi v16, v8, 1 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a2, 24 ; RV32-NEXT: mul a0, a0, a2 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v16, v16, v24 ; RV32-NEXT: vsub.vv v16, v8, v16 ; RV32-NEXT: vand.vv v8, v16, v0 ; RV32-NEXT: vsrl.vi v16, v16, 2 ; RV32-NEXT: vand.vv v16, v16, v0 ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: vsrl.vi v16, v8, 4 ; RV32-NEXT: vadd.vv v8, v8, v16 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 4 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vand.vv v8, v8, v16 ; RV32-NEXT: addi a0, sp, 48 ; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload ; RV32-NEXT: vmul.vv v8, v8, v16 ; RV32-NEXT: vsrl.vx v16, v8, a1 ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: slli a0, a0, 3 ; RV32-NEXT: add a0, sp, a0 ; RV32-NEXT: addi a0, a0, 48 ; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload ; RV32-NEXT: csrr a0, vlenb ; RV32-NEXT: li a1, 40 ; RV32-NEXT: mul a0, a0, a1 ; RV32-NEXT: add sp, sp, a0 ; RV32-NEXT: addi sp, sp, 48 ; RV32-NEXT: ret ; ; RV64-LABEL: vp_ctpop_v32i64_unmasked: ; RV64: # %bb.0: ; RV64-NEXT: li a2, 16 ; RV64-NEXT: mv a1, a0 ; RV64-NEXT: bltu a0, a2, .LBB35_2 ; RV64-NEXT: # %bb.1: ; RV64-NEXT: li a1, 16 ; RV64-NEXT: .LBB35_2: ; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v24, v8, 1 ; RV64-NEXT: lui a1, 349525 ; RV64-NEXT: addiw a1, a1, 1365 ; RV64-NEXT: slli a2, a1, 32 ; RV64-NEXT: add a1, a1, a2 ; RV64-NEXT: vand.vx v24, v24, a1 ; RV64-NEXT: vsub.vv v8, v8, v24 ; RV64-NEXT: lui a2, 209715 ; RV64-NEXT: addiw a2, a2, 819 ; RV64-NEXT: slli a3, a2, 32 ; RV64-NEXT: add a2, a2, a3 ; RV64-NEXT: vand.vx v24, v8, a2 ; RV64-NEXT: vsrl.vi v8, v8, 2 ; RV64-NEXT: vand.vx v8, v8, a2 ; RV64-NEXT: vadd.vv v8, v24, v8 ; RV64-NEXT: vsrl.vi v24, v8, 4 ; RV64-NEXT: vadd.vv v8, v8, v24 ; RV64-NEXT: lui a3, 61681 ; RV64-NEXT: addiw a3, a3, -241 ; RV64-NEXT: slli a4, a3, 32 ; RV64-NEXT: add a3, a3, a4 ; RV64-NEXT: vand.vx v8, v8, a3 ; RV64-NEXT: lui a4, 4112 ; RV64-NEXT: addiw a4, a4, 257 ; RV64-NEXT: slli a5, a4, 32 ; RV64-NEXT: add a4, a4, a5 ; RV64-NEXT: vmul.vx v8, v8, a4 ; RV64-NEXT: li a5, 56 ; RV64-NEXT: vsrl.vx v8, v8, a5 ; RV64-NEXT: addi a6, a0, -16 ; RV64-NEXT: sltu a0, a0, a6 ; RV64-NEXT: addi a0, a0, -1 ; RV64-NEXT: and a0, a0, a6 ; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma ; RV64-NEXT: vsrl.vi v24, v16, 1 ; RV64-NEXT: vand.vx v24, v24, a1 ; RV64-NEXT: vsub.vv v16, v16, v24 ; RV64-NEXT: vand.vx v24, v16, a2 ; RV64-NEXT: vsrl.vi v16, v16, 2 ; RV64-NEXT: vand.vx v16, v16, a2 ; RV64-NEXT: vadd.vv v16, v24, v16 ; RV64-NEXT: vsrl.vi v24, v16, 4 ; RV64-NEXT: vadd.vv v16, v16, v24 ; RV64-NEXT: vand.vx v16, v16, a3 ; RV64-NEXT: vmul.vx v16, v16, a4 ; RV64-NEXT: vsrl.vx v16, v16, a5 ; RV64-NEXT: ret %head = insertelement <32 x i1> poison, i1 true, i32 0 %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl) ret <32 x i64> %v }