; NOTE: Assertions have been autogenerated by utils/update_test_checks.py ; RUN: opt -passes="function(loop(indvars,loop-idiom,loop-deletion),simplifycfg)" -S < %s | FileCheck %s ; Compile command: ; $ clang -m64 -fno-discard-value-names -O0 -S -emit-llvm -Xclang -disable-O0-optnone Code.c ; $ bin/opt -S -passes=mem2reg,loop-simplify,lcssa,loop-rotate \ ; -passes=licm,simple-loop-unswitch -enable-nontrivial-unswitch -passes=loop-simplify \ ; -passes=loop-deletion,simplifycfg,indvars Code.ll > CodeOpt.ll target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128" ; void PositiveFor64(int *ar, long long n, long long m) ; { ; long long i; ; for (i=0; i=0; --i) { ; int *arr = ar + i * m; ; memset(arr, 0, m * sizeof(int)); ; } ; } define dso_local void @NegativeFor64(ptr %ar, i64 %n, i64 %m) { ; CHECK-LABEL: @NegativeFor64( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i64 [[N:%.*]], 1 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i64 [[SUB]], 0 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]] ; CHECK: for.body.lr.ph: ; CHECK-NEXT: [[MUL1:%.*]] = mul i64 [[M:%.*]], 4 ; CHECK-NEXT: [[TMP0:%.*]] = mul i64 [[M]], [[N]] ; CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[TMP0]], 2 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP1]], i1 false) ; CHECK-NEXT: br label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; entry: %sub = sub nsw i64 %n, 1 %cmp1 = icmp sge i64 %sub, 0 br i1 %cmp1, label %for.body.lr.ph, label %for.end for.body.lr.ph: ; preds = %entry %mul1 = mul i64 %m, 4 br label %for.body for.body: ; preds = %for.body.lr.ph, %for.body %i.02 = phi i64 [ %sub, %for.body.lr.ph ], [ %dec, %for.body ] %mul = mul nsw i64 %i.02, %m %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %mul call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul1, i1 false) %dec = add nsw i64 %i.02, -1 %cmp = icmp sge i64 %dec, 0 br i1 %cmp, label %for.body, label %for.end for.end: ; preds = %for.body, %entry ret void } ; void NestedFor64(int *ar, long long n, long long m, long long o) ; { ; long long i, j; ; for (i=0; i=0; i--) { ; int *arr = ar + i * m; ; memset(arr, 0, m * sizeof(int)); ; } ; } define void @Negative32(ptr %ar, i32 %n, i32 %m) { ; CHECK-LABEL: @Negative32( ; CHECK-NEXT: entry: ; CHECK-NEXT: [[SUB:%.*]] = sub nsw i32 [[N:%.*]], 1 ; CHECK-NEXT: [[CONV:%.*]] = sext i32 [[SUB]] to i64 ; CHECK-NEXT: [[CMP1:%.*]] = icmp sge i64 [[CONV]], 0 ; CHECK-NEXT: br i1 [[CMP1]], label [[FOR_BODY_LR_PH:%.*]], label [[FOR_END:%.*]] ; CHECK: for.body.lr.ph: ; CHECK-NEXT: [[CONV1:%.*]] = sext i32 [[M:%.*]] to i64 ; CHECK-NEXT: [[CONV2:%.*]] = sext i32 [[M]] to i64 ; CHECK-NEXT: [[MUL3:%.*]] = mul i64 [[CONV2]], 4 ; CHECK-NEXT: [[TMP0:%.*]] = sext i32 [[N]] to i64 ; CHECK-NEXT: [[TMP1:%.*]] = mul i64 [[CONV1]], [[TMP0]] ; CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[TMP1]], 2 ; CHECK-NEXT: call void @llvm.memset.p0.i64(ptr align 4 [[AR:%.*]], i8 0, i64 [[TMP2]], i1 false) ; CHECK-NEXT: br label [[FOR_END]] ; CHECK: for.end: ; CHECK-NEXT: ret void ; entry: %sub = sub nsw i32 %n, 1 %conv = sext i32 %sub to i64 %cmp1 = icmp sge i64 %conv, 0 br i1 %cmp1, label %for.body.lr.ph, label %for.end for.body.lr.ph: ; preds = %entry %conv1 = sext i32 %m to i64 %conv2 = sext i32 %m to i64 %mul3 = mul i64 %conv2, 4 br label %for.body for.body: ; preds = %for.body.lr.ph, %for.body %i.02 = phi i64 [ %conv, %for.body.lr.ph ], [ %dec, %for.body ] %mul = mul nsw i64 %i.02, %conv1 %add.ptr = getelementptr inbounds i32, ptr %ar, i64 %mul call void @llvm.memset.p0.i64(ptr align 4 %add.ptr, i8 0, i64 %mul3, i1 false) %dec = add nsw i64 %i.02, -1 %cmp = icmp sge i64 %dec, 0 br i1 %cmp, label %for.body, label %for.end for.end: ; preds = %for.body, %entry ret void } ; This case requires SCEVFolder in LoopIdiomRecognize.cpp to fold SCEV prior to comparison. ; For the inner-loop, SCEVFolder is not needed, however the promoted memset size would be based ; on the trip count of inner-loop (which is an unsigned integer). ; Then in the outer loop, the pointer stride SCEV for memset needs to be converted based on the ; loop guard for it to equal to the memset size SCEV. The loop guard guaranteeds that m >= 0 ; inside the loop, so m can be converted from sext to zext, making the two SCEV-s equal. ; void NestedFor32(int *ar, int n, int m, int o) ; { ; int i, j; ; for (i=0; i