; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 4 ; RUN: opt %s -S -riscv-codegenprepare -mtriple=riscv64 -mattr=+v | FileCheck %s declare i64 @llvm.vscale.i64() declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) define float @reduce_fadd(ptr %f) { ; CHECK-LABEL: define float @reduce_fadd( ; CHECK-SAME: ptr [[F:%.*]]) #[[ATTR2:[0-9]+]] { ; CHECK-NEXT: entry: ; CHECK-NEXT: [[VSCALE:%.*]] = tail call i64 @llvm.vscale.i64() ; CHECK-NEXT: [[VECSIZE:%.*]] = shl nuw nsw i64 [[VSCALE]], 2 ; CHECK-NEXT: br label [[VECTOR_BODY:%.*]] ; CHECK: vector.body: ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDEX_NEXT:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[TMP0:%.*]] = phi [ insertelement ( poison, float 0.000000e+00, i64 0), [[ENTRY]] ], [ [[TMP2:%.*]], [[VECTOR_BODY]] ] ; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds float, ptr [[F]], i64 [[INDEX]] ; CHECK-NEXT: [[WIDE_LOAD:%.*]] = load , ptr [[GEP]], align 4 ; CHECK-NEXT: [[TMP1:%.*]] = extractelement [[TMP0]], i64 0 ; CHECK-NEXT: [[ACC:%.*]] = tail call float @llvm.vector.reduce.fadd.nxv4f32(float [[TMP1]], [[WIDE_LOAD]]) ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], [[VECSIZE]] ; CHECK-NEXT: [[DONE:%.*]] = icmp eq i64 [[INDEX_NEXT]], 1024 ; CHECK-NEXT: [[TMP2]] = insertelement poison, float [[ACC]], i64 0 ; CHECK-NEXT: br i1 [[DONE]], label [[EXIT:%.*]], label [[VECTOR_BODY]] ; CHECK: exit: ; CHECK-NEXT: ret float [[ACC]] ; entry: %vscale = tail call i64 @llvm.vscale.i64() %vecsize = shl nuw nsw i64 %vscale, 2 br label %vector.body vector.body: %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ] %vec.phi = phi float [ 0.000000e+00, %entry ], [ %acc, %vector.body ] %gep = getelementptr inbounds float, ptr %f, i64 %index %wide.load = load , ptr %gep, align 4 %acc = tail call float @llvm.vector.reduce.fadd.nxv4f32(float %vec.phi, %wide.load) %index.next = add nuw i64 %index, %vecsize %done = icmp eq i64 %index.next, 1024 br i1 %done, label %exit, label %vector.body exit: ret float %acc }