1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
13 //
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. We only create one SCEV of a particular shape, so
18 // pointer-comparisons for equality are legal.
19 //
20 // One important aspect of the SCEV objects is that they are never cyclic, even
21 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
22 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
23 // recurrence) then we represent it directly as a recurrence node, otherwise we
24 // represent it as a SCEVUnknown node.
25 //
26 // In addition to being able to represent expressions of various types, we also
27 // have folders that are used to build the *canonical* representation for a
28 // particular expression. These folders are capable of using a variety of
29 // rewrite rules to simplify the expressions.
30 //
31 // Once the folders are defined, we can implement the more interesting
32 // higher-level code, such as the code that recognizes PHI nodes of various
33 // types, computes the execution count of a loop, etc.
34 //
35 // TODO: We should use these routines and value representations to implement
36 // dependence analysis!
37 //
38 //===----------------------------------------------------------------------===//
39 //
40 // There are several good references for the techniques used in this analysis.
41 //
42 // Chains of recurrences -- a method to expedite the evaluation
43 // of closed-form functions
44 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
45 //
46 // On computational properties of chains of recurrences
47 // Eugene V. Zima
48 //
49 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
50 // Robert A. van Engelen
51 //
52 // Efficient Symbolic Analysis for Optimizing Compilers
53 // Robert A. van Engelen
54 //
55 // Using the chains of recurrences algebra for data dependence testing and
56 // induction variable substitution
57 // MS Thesis, Johnie Birch
58 //
59 //===----------------------------------------------------------------------===//
60
61 #define DEBUG_TYPE "scalar-evolution"
62 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
63 #include "llvm/Constants.h"
64 #include "llvm/DerivedTypes.h"
65 #include "llvm/GlobalVariable.h"
66 #include "llvm/GlobalAlias.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Operator.h"
70 #include "llvm/Analysis/ConstantFolding.h"
71 #include "llvm/Analysis/Dominators.h"
72 #include "llvm/Analysis/InstructionSimplify.h"
73 #include "llvm/Analysis/LoopInfo.h"
74 #include "llvm/Analysis/ValueTracking.h"
75 #include "llvm/Assembly/Writer.h"
76 #include "llvm/Target/TargetData.h"
77 #include "llvm/Support/CommandLine.h"
78 #include "llvm/Support/ConstantRange.h"
79 #include "llvm/Support/Debug.h"
80 #include "llvm/Support/ErrorHandling.h"
81 #include "llvm/Support/GetElementPtrTypeIterator.h"
82 #include "llvm/Support/InstIterator.h"
83 #include "llvm/Support/MathExtras.h"
84 #include "llvm/Support/raw_ostream.h"
85 #include "llvm/ADT/Statistic.h"
86 #include "llvm/ADT/STLExtras.h"
87 #include "llvm/ADT/SmallPtrSet.h"
88 #include <algorithm>
89 using namespace llvm;
90
91 STATISTIC(NumArrayLenItCounts,
92 "Number of trip counts computed with array length");
93 STATISTIC(NumTripCountsComputed,
94 "Number of loops with predictable loop counts");
95 STATISTIC(NumTripCountsNotComputed,
96 "Number of loops without predictable loop counts");
97 STATISTIC(NumBruteForceTripCountsComputed,
98 "Number of loops with trip counts computed by force");
99
100 static cl::opt<unsigned>
101 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
102 cl::desc("Maximum number of iterations SCEV will "
103 "symbolically execute a constant "
104 "derived loop"),
105 cl::init(100));
106
107 INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
108 "Scalar Evolution Analysis", false, true)
109 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
110 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
111 INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
112 "Scalar Evolution Analysis", false, true)
113 char ScalarEvolution::ID = 0;
114
115 //===----------------------------------------------------------------------===//
116 // SCEV class definitions
117 //===----------------------------------------------------------------------===//
118
119 //===----------------------------------------------------------------------===//
120 // Implementation of the SCEV class.
121 //
122
dump() const123 void SCEV::dump() const {
124 print(dbgs());
125 dbgs() << '\n';
126 }
127
print(raw_ostream & OS) const128 void SCEV::print(raw_ostream &OS) const {
129 switch (getSCEVType()) {
130 case scConstant:
131 WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
132 return;
133 case scTruncate: {
134 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
135 const SCEV *Op = Trunc->getOperand();
136 OS << "(trunc " << *Op->getType() << " " << *Op << " to "
137 << *Trunc->getType() << ")";
138 return;
139 }
140 case scZeroExtend: {
141 const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
142 const SCEV *Op = ZExt->getOperand();
143 OS << "(zext " << *Op->getType() << " " << *Op << " to "
144 << *ZExt->getType() << ")";
145 return;
146 }
147 case scSignExtend: {
148 const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
149 const SCEV *Op = SExt->getOperand();
150 OS << "(sext " << *Op->getType() << " " << *Op << " to "
151 << *SExt->getType() << ")";
152 return;
153 }
154 case scAddRecExpr: {
155 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
156 OS << "{" << *AR->getOperand(0);
157 for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
158 OS << ",+," << *AR->getOperand(i);
159 OS << "}<";
160 if (AR->getNoWrapFlags(FlagNUW))
161 OS << "nuw><";
162 if (AR->getNoWrapFlags(FlagNSW))
163 OS << "nsw><";
164 if (AR->getNoWrapFlags(FlagNW) &&
165 !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
166 OS << "nw><";
167 WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
168 OS << ">";
169 return;
170 }
171 case scAddExpr:
172 case scMulExpr:
173 case scUMaxExpr:
174 case scSMaxExpr: {
175 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
176 const char *OpStr = 0;
177 switch (NAry->getSCEVType()) {
178 case scAddExpr: OpStr = " + "; break;
179 case scMulExpr: OpStr = " * "; break;
180 case scUMaxExpr: OpStr = " umax "; break;
181 case scSMaxExpr: OpStr = " smax "; break;
182 }
183 OS << "(";
184 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
185 I != E; ++I) {
186 OS << **I;
187 if (llvm::next(I) != E)
188 OS << OpStr;
189 }
190 OS << ")";
191 return;
192 }
193 case scUDivExpr: {
194 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
195 OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
196 return;
197 }
198 case scUnknown: {
199 const SCEVUnknown *U = cast<SCEVUnknown>(this);
200 Type *AllocTy;
201 if (U->isSizeOf(AllocTy)) {
202 OS << "sizeof(" << *AllocTy << ")";
203 return;
204 }
205 if (U->isAlignOf(AllocTy)) {
206 OS << "alignof(" << *AllocTy << ")";
207 return;
208 }
209
210 Type *CTy;
211 Constant *FieldNo;
212 if (U->isOffsetOf(CTy, FieldNo)) {
213 OS << "offsetof(" << *CTy << ", ";
214 WriteAsOperand(OS, FieldNo, false);
215 OS << ")";
216 return;
217 }
218
219 // Otherwise just print it normally.
220 WriteAsOperand(OS, U->getValue(), false);
221 return;
222 }
223 case scCouldNotCompute:
224 OS << "***COULDNOTCOMPUTE***";
225 return;
226 default: break;
227 }
228 llvm_unreachable("Unknown SCEV kind!");
229 }
230
getType() const231 Type *SCEV::getType() const {
232 switch (getSCEVType()) {
233 case scConstant:
234 return cast<SCEVConstant>(this)->getType();
235 case scTruncate:
236 case scZeroExtend:
237 case scSignExtend:
238 return cast<SCEVCastExpr>(this)->getType();
239 case scAddRecExpr:
240 case scMulExpr:
241 case scUMaxExpr:
242 case scSMaxExpr:
243 return cast<SCEVNAryExpr>(this)->getType();
244 case scAddExpr:
245 return cast<SCEVAddExpr>(this)->getType();
246 case scUDivExpr:
247 return cast<SCEVUDivExpr>(this)->getType();
248 case scUnknown:
249 return cast<SCEVUnknown>(this)->getType();
250 case scCouldNotCompute:
251 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
252 return 0;
253 default: break;
254 }
255 llvm_unreachable("Unknown SCEV kind!");
256 return 0;
257 }
258
isZero() const259 bool SCEV::isZero() const {
260 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
261 return SC->getValue()->isZero();
262 return false;
263 }
264
isOne() const265 bool SCEV::isOne() const {
266 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
267 return SC->getValue()->isOne();
268 return false;
269 }
270
isAllOnesValue() const271 bool SCEV::isAllOnesValue() const {
272 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
273 return SC->getValue()->isAllOnesValue();
274 return false;
275 }
276
SCEVCouldNotCompute()277 SCEVCouldNotCompute::SCEVCouldNotCompute() :
278 SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
279
classof(const SCEV * S)280 bool SCEVCouldNotCompute::classof(const SCEV *S) {
281 return S->getSCEVType() == scCouldNotCompute;
282 }
283
getConstant(ConstantInt * V)284 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
285 FoldingSetNodeID ID;
286 ID.AddInteger(scConstant);
287 ID.AddPointer(V);
288 void *IP = 0;
289 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
290 SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
291 UniqueSCEVs.InsertNode(S, IP);
292 return S;
293 }
294
getConstant(const APInt & Val)295 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
296 return getConstant(ConstantInt::get(getContext(), Val));
297 }
298
299 const SCEV *
getConstant(Type * Ty,uint64_t V,bool isSigned)300 ScalarEvolution::getConstant(Type *Ty, uint64_t V, bool isSigned) {
301 IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
302 return getConstant(ConstantInt::get(ITy, V, isSigned));
303 }
304
SCEVCastExpr(const FoldingSetNodeIDRef ID,unsigned SCEVTy,const SCEV * op,Type * ty)305 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
306 unsigned SCEVTy, const SCEV *op, Type *ty)
307 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
308
SCEVTruncateExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)309 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
310 const SCEV *op, Type *ty)
311 : SCEVCastExpr(ID, scTruncate, op, ty) {
312 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
313 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
314 "Cannot truncate non-integer value!");
315 }
316
SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)317 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
318 const SCEV *op, Type *ty)
319 : SCEVCastExpr(ID, scZeroExtend, op, ty) {
320 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
321 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
322 "Cannot zero extend non-integer value!");
323 }
324
SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,const SCEV * op,Type * ty)325 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
326 const SCEV *op, Type *ty)
327 : SCEVCastExpr(ID, scSignExtend, op, ty) {
328 assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
329 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
330 "Cannot sign extend non-integer value!");
331 }
332
deleted()333 void SCEVUnknown::deleted() {
334 // Clear this SCEVUnknown from various maps.
335 SE->forgetMemoizedResults(this);
336
337 // Remove this SCEVUnknown from the uniquing map.
338 SE->UniqueSCEVs.RemoveNode(this);
339
340 // Release the value.
341 setValPtr(0);
342 }
343
allUsesReplacedWith(Value * New)344 void SCEVUnknown::allUsesReplacedWith(Value *New) {
345 // Clear this SCEVUnknown from various maps.
346 SE->forgetMemoizedResults(this);
347
348 // Remove this SCEVUnknown from the uniquing map.
349 SE->UniqueSCEVs.RemoveNode(this);
350
351 // Update this SCEVUnknown to point to the new value. This is needed
352 // because there may still be outstanding SCEVs which still point to
353 // this SCEVUnknown.
354 setValPtr(New);
355 }
356
isSizeOf(Type * & AllocTy) const357 bool SCEVUnknown::isSizeOf(Type *&AllocTy) const {
358 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
359 if (VCE->getOpcode() == Instruction::PtrToInt)
360 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
361 if (CE->getOpcode() == Instruction::GetElementPtr &&
362 CE->getOperand(0)->isNullValue() &&
363 CE->getNumOperands() == 2)
364 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
365 if (CI->isOne()) {
366 AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
367 ->getElementType();
368 return true;
369 }
370
371 return false;
372 }
373
isAlignOf(Type * & AllocTy) const374 bool SCEVUnknown::isAlignOf(Type *&AllocTy) const {
375 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
376 if (VCE->getOpcode() == Instruction::PtrToInt)
377 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
378 if (CE->getOpcode() == Instruction::GetElementPtr &&
379 CE->getOperand(0)->isNullValue()) {
380 Type *Ty =
381 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
382 if (StructType *STy = dyn_cast<StructType>(Ty))
383 if (!STy->isPacked() &&
384 CE->getNumOperands() == 3 &&
385 CE->getOperand(1)->isNullValue()) {
386 if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
387 if (CI->isOne() &&
388 STy->getNumElements() == 2 &&
389 STy->getElementType(0)->isIntegerTy(1)) {
390 AllocTy = STy->getElementType(1);
391 return true;
392 }
393 }
394 }
395
396 return false;
397 }
398
isOffsetOf(Type * & CTy,Constant * & FieldNo) const399 bool SCEVUnknown::isOffsetOf(Type *&CTy, Constant *&FieldNo) const {
400 if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
401 if (VCE->getOpcode() == Instruction::PtrToInt)
402 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
403 if (CE->getOpcode() == Instruction::GetElementPtr &&
404 CE->getNumOperands() == 3 &&
405 CE->getOperand(0)->isNullValue() &&
406 CE->getOperand(1)->isNullValue()) {
407 Type *Ty =
408 cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
409 // Ignore vector types here so that ScalarEvolutionExpander doesn't
410 // emit getelementptrs that index into vectors.
411 if (Ty->isStructTy() || Ty->isArrayTy()) {
412 CTy = Ty;
413 FieldNo = CE->getOperand(2);
414 return true;
415 }
416 }
417
418 return false;
419 }
420
421 //===----------------------------------------------------------------------===//
422 // SCEV Utilities
423 //===----------------------------------------------------------------------===//
424
425 namespace {
426 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
427 /// than the complexity of the RHS. This comparator is used to canonicalize
428 /// expressions.
429 class SCEVComplexityCompare {
430 const LoopInfo *const LI;
431 public:
SCEVComplexityCompare(const LoopInfo * li)432 explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
433
434 // Return true or false if LHS is less than, or at least RHS, respectively.
operator ()(const SCEV * LHS,const SCEV * RHS) const435 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
436 return compare(LHS, RHS) < 0;
437 }
438
439 // Return negative, zero, or positive, if LHS is less than, equal to, or
440 // greater than RHS, respectively. A three-way result allows recursive
441 // comparisons to be more efficient.
compare(const SCEV * LHS,const SCEV * RHS) const442 int compare(const SCEV *LHS, const SCEV *RHS) const {
443 // Fast-path: SCEVs are uniqued so we can do a quick equality check.
444 if (LHS == RHS)
445 return 0;
446
447 // Primarily, sort the SCEVs by their getSCEVType().
448 unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
449 if (LType != RType)
450 return (int)LType - (int)RType;
451
452 // Aside from the getSCEVType() ordering, the particular ordering
453 // isn't very important except that it's beneficial to be consistent,
454 // so that (a + b) and (b + a) don't end up as different expressions.
455 switch (LType) {
456 case scUnknown: {
457 const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
458 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
459
460 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
461 // not as complete as it could be.
462 const Value *LV = LU->getValue(), *RV = RU->getValue();
463
464 // Order pointer values after integer values. This helps SCEVExpander
465 // form GEPs.
466 bool LIsPointer = LV->getType()->isPointerTy(),
467 RIsPointer = RV->getType()->isPointerTy();
468 if (LIsPointer != RIsPointer)
469 return (int)LIsPointer - (int)RIsPointer;
470
471 // Compare getValueID values.
472 unsigned LID = LV->getValueID(),
473 RID = RV->getValueID();
474 if (LID != RID)
475 return (int)LID - (int)RID;
476
477 // Sort arguments by their position.
478 if (const Argument *LA = dyn_cast<Argument>(LV)) {
479 const Argument *RA = cast<Argument>(RV);
480 unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
481 return (int)LArgNo - (int)RArgNo;
482 }
483
484 // For instructions, compare their loop depth, and their operand
485 // count. This is pretty loose.
486 if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
487 const Instruction *RInst = cast<Instruction>(RV);
488
489 // Compare loop depths.
490 const BasicBlock *LParent = LInst->getParent(),
491 *RParent = RInst->getParent();
492 if (LParent != RParent) {
493 unsigned LDepth = LI->getLoopDepth(LParent),
494 RDepth = LI->getLoopDepth(RParent);
495 if (LDepth != RDepth)
496 return (int)LDepth - (int)RDepth;
497 }
498
499 // Compare the number of operands.
500 unsigned LNumOps = LInst->getNumOperands(),
501 RNumOps = RInst->getNumOperands();
502 return (int)LNumOps - (int)RNumOps;
503 }
504
505 return 0;
506 }
507
508 case scConstant: {
509 const SCEVConstant *LC = cast<SCEVConstant>(LHS);
510 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
511
512 // Compare constant values.
513 const APInt &LA = LC->getValue()->getValue();
514 const APInt &RA = RC->getValue()->getValue();
515 unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
516 if (LBitWidth != RBitWidth)
517 return (int)LBitWidth - (int)RBitWidth;
518 return LA.ult(RA) ? -1 : 1;
519 }
520
521 case scAddRecExpr: {
522 const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
523 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
524
525 // Compare addrec loop depths.
526 const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
527 if (LLoop != RLoop) {
528 unsigned LDepth = LLoop->getLoopDepth(),
529 RDepth = RLoop->getLoopDepth();
530 if (LDepth != RDepth)
531 return (int)LDepth - (int)RDepth;
532 }
533
534 // Addrec complexity grows with operand count.
535 unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
536 if (LNumOps != RNumOps)
537 return (int)LNumOps - (int)RNumOps;
538
539 // Lexicographically compare.
540 for (unsigned i = 0; i != LNumOps; ++i) {
541 long X = compare(LA->getOperand(i), RA->getOperand(i));
542 if (X != 0)
543 return X;
544 }
545
546 return 0;
547 }
548
549 case scAddExpr:
550 case scMulExpr:
551 case scSMaxExpr:
552 case scUMaxExpr: {
553 const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
554 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
555
556 // Lexicographically compare n-ary expressions.
557 unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
558 for (unsigned i = 0; i != LNumOps; ++i) {
559 if (i >= RNumOps)
560 return 1;
561 long X = compare(LC->getOperand(i), RC->getOperand(i));
562 if (X != 0)
563 return X;
564 }
565 return (int)LNumOps - (int)RNumOps;
566 }
567
568 case scUDivExpr: {
569 const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
570 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
571
572 // Lexicographically compare udiv expressions.
573 long X = compare(LC->getLHS(), RC->getLHS());
574 if (X != 0)
575 return X;
576 return compare(LC->getRHS(), RC->getRHS());
577 }
578
579 case scTruncate:
580 case scZeroExtend:
581 case scSignExtend: {
582 const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
583 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
584
585 // Compare cast expressions by operand.
586 return compare(LC->getOperand(), RC->getOperand());
587 }
588
589 default:
590 break;
591 }
592
593 llvm_unreachable("Unknown SCEV kind!");
594 return 0;
595 }
596 };
597 }
598
599 /// GroupByComplexity - Given a list of SCEV objects, order them by their
600 /// complexity, and group objects of the same complexity together by value.
601 /// When this routine is finished, we know that any duplicates in the vector are
602 /// consecutive and that complexity is monotonically increasing.
603 ///
604 /// Note that we go take special precautions to ensure that we get deterministic
605 /// results from this routine. In other words, we don't want the results of
606 /// this to depend on where the addresses of various SCEV objects happened to
607 /// land in memory.
608 ///
GroupByComplexity(SmallVectorImpl<const SCEV * > & Ops,LoopInfo * LI)609 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
610 LoopInfo *LI) {
611 if (Ops.size() < 2) return; // Noop
612 if (Ops.size() == 2) {
613 // This is the common case, which also happens to be trivially simple.
614 // Special case it.
615 const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
616 if (SCEVComplexityCompare(LI)(RHS, LHS))
617 std::swap(LHS, RHS);
618 return;
619 }
620
621 // Do the rough sort by complexity.
622 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
623
624 // Now that we are sorted by complexity, group elements of the same
625 // complexity. Note that this is, at worst, N^2, but the vector is likely to
626 // be extremely short in practice. Note that we take this approach because we
627 // do not want to depend on the addresses of the objects we are grouping.
628 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
629 const SCEV *S = Ops[i];
630 unsigned Complexity = S->getSCEVType();
631
632 // If there are any objects of the same complexity and same value as this
633 // one, group them.
634 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
635 if (Ops[j] == S) { // Found a duplicate.
636 // Move it to immediately after i'th element.
637 std::swap(Ops[i+1], Ops[j]);
638 ++i; // no need to rescan it.
639 if (i == e-2) return; // Done!
640 }
641 }
642 }
643 }
644
645
646
647 //===----------------------------------------------------------------------===//
648 // Simple SCEV method implementations
649 //===----------------------------------------------------------------------===//
650
651 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
652 /// Assume, K > 0.
BinomialCoefficient(const SCEV * It,unsigned K,ScalarEvolution & SE,Type * ResultTy)653 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
654 ScalarEvolution &SE,
655 Type *ResultTy) {
656 // Handle the simplest case efficiently.
657 if (K == 1)
658 return SE.getTruncateOrZeroExtend(It, ResultTy);
659
660 // We are using the following formula for BC(It, K):
661 //
662 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
663 //
664 // Suppose, W is the bitwidth of the return value. We must be prepared for
665 // overflow. Hence, we must assure that the result of our computation is
666 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
667 // safe in modular arithmetic.
668 //
669 // However, this code doesn't use exactly that formula; the formula it uses
670 // is something like the following, where T is the number of factors of 2 in
671 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
672 // exponentiation:
673 //
674 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
675 //
676 // This formula is trivially equivalent to the previous formula. However,
677 // this formula can be implemented much more efficiently. The trick is that
678 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
679 // arithmetic. To do exact division in modular arithmetic, all we have
680 // to do is multiply by the inverse. Therefore, this step can be done at
681 // width W.
682 //
683 // The next issue is how to safely do the division by 2^T. The way this
684 // is done is by doing the multiplication step at a width of at least W + T
685 // bits. This way, the bottom W+T bits of the product are accurate. Then,
686 // when we perform the division by 2^T (which is equivalent to a right shift
687 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
688 // truncated out after the division by 2^T.
689 //
690 // In comparison to just directly using the first formula, this technique
691 // is much more efficient; using the first formula requires W * K bits,
692 // but this formula less than W + K bits. Also, the first formula requires
693 // a division step, whereas this formula only requires multiplies and shifts.
694 //
695 // It doesn't matter whether the subtraction step is done in the calculation
696 // width or the input iteration count's width; if the subtraction overflows,
697 // the result must be zero anyway. We prefer here to do it in the width of
698 // the induction variable because it helps a lot for certain cases; CodeGen
699 // isn't smart enough to ignore the overflow, which leads to much less
700 // efficient code if the width of the subtraction is wider than the native
701 // register width.
702 //
703 // (It's possible to not widen at all by pulling out factors of 2 before
704 // the multiplication; for example, K=2 can be calculated as
705 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
706 // extra arithmetic, so it's not an obvious win, and it gets
707 // much more complicated for K > 3.)
708
709 // Protection from insane SCEVs; this bound is conservative,
710 // but it probably doesn't matter.
711 if (K > 1000)
712 return SE.getCouldNotCompute();
713
714 unsigned W = SE.getTypeSizeInBits(ResultTy);
715
716 // Calculate K! / 2^T and T; we divide out the factors of two before
717 // multiplying for calculating K! / 2^T to avoid overflow.
718 // Other overflow doesn't matter because we only care about the bottom
719 // W bits of the result.
720 APInt OddFactorial(W, 1);
721 unsigned T = 1;
722 for (unsigned i = 3; i <= K; ++i) {
723 APInt Mult(W, i);
724 unsigned TwoFactors = Mult.countTrailingZeros();
725 T += TwoFactors;
726 Mult = Mult.lshr(TwoFactors);
727 OddFactorial *= Mult;
728 }
729
730 // We need at least W + T bits for the multiplication step
731 unsigned CalculationBits = W + T;
732
733 // Calculate 2^T, at width T+W.
734 APInt DivFactor = APInt(CalculationBits, 1).shl(T);
735
736 // Calculate the multiplicative inverse of K! / 2^T;
737 // this multiplication factor will perform the exact division by
738 // K! / 2^T.
739 APInt Mod = APInt::getSignedMinValue(W+1);
740 APInt MultiplyFactor = OddFactorial.zext(W+1);
741 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
742 MultiplyFactor = MultiplyFactor.trunc(W);
743
744 // Calculate the product, at width T+W
745 IntegerType *CalculationTy = IntegerType::get(SE.getContext(),
746 CalculationBits);
747 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
748 for (unsigned i = 1; i != K; ++i) {
749 const SCEV *S = SE.getMinusSCEV(It, SE.getConstant(It->getType(), i));
750 Dividend = SE.getMulExpr(Dividend,
751 SE.getTruncateOrZeroExtend(S, CalculationTy));
752 }
753
754 // Divide by 2^T
755 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
756
757 // Truncate the result, and divide by K! / 2^T.
758
759 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
760 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
761 }
762
763 /// evaluateAtIteration - Return the value of this chain of recurrences at
764 /// the specified iteration number. We can evaluate this recurrence by
765 /// multiplying each element in the chain by the binomial coefficient
766 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
767 ///
768 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
769 ///
770 /// where BC(It, k) stands for binomial coefficient.
771 ///
evaluateAtIteration(const SCEV * It,ScalarEvolution & SE) const772 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
773 ScalarEvolution &SE) const {
774 const SCEV *Result = getStart();
775 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
776 // The computation is correct in the face of overflow provided that the
777 // multiplication is performed _after_ the evaluation of the binomial
778 // coefficient.
779 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
780 if (isa<SCEVCouldNotCompute>(Coeff))
781 return Coeff;
782
783 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
784 }
785 return Result;
786 }
787
788 //===----------------------------------------------------------------------===//
789 // SCEV Expression folder implementations
790 //===----------------------------------------------------------------------===//
791
getTruncateExpr(const SCEV * Op,Type * Ty)792 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
793 Type *Ty) {
794 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
795 "This is not a truncating conversion!");
796 assert(isSCEVable(Ty) &&
797 "This is not a conversion to a SCEVable type!");
798 Ty = getEffectiveSCEVType(Ty);
799
800 FoldingSetNodeID ID;
801 ID.AddInteger(scTruncate);
802 ID.AddPointer(Op);
803 ID.AddPointer(Ty);
804 void *IP = 0;
805 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
806
807 // Fold if the operand is constant.
808 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
809 return getConstant(
810 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(),
811 getEffectiveSCEVType(Ty))));
812
813 // trunc(trunc(x)) --> trunc(x)
814 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
815 return getTruncateExpr(ST->getOperand(), Ty);
816
817 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
818 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
819 return getTruncateOrSignExtend(SS->getOperand(), Ty);
820
821 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
822 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
823 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
824
825 // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
826 // eliminate all the truncates.
827 if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
828 SmallVector<const SCEV *, 4> Operands;
829 bool hasTrunc = false;
830 for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
831 const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
832 hasTrunc = isa<SCEVTruncateExpr>(S);
833 Operands.push_back(S);
834 }
835 if (!hasTrunc)
836 return getAddExpr(Operands);
837 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
838 }
839
840 // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
841 // eliminate all the truncates.
842 if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
843 SmallVector<const SCEV *, 4> Operands;
844 bool hasTrunc = false;
845 for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
846 const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
847 hasTrunc = isa<SCEVTruncateExpr>(S);
848 Operands.push_back(S);
849 }
850 if (!hasTrunc)
851 return getMulExpr(Operands);
852 UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
853 }
854
855 // If the input value is a chrec scev, truncate the chrec's operands.
856 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
857 SmallVector<const SCEV *, 4> Operands;
858 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
859 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
860 return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
861 }
862
863 // As a special case, fold trunc(undef) to undef. We don't want to
864 // know too much about SCEVUnknowns, but this special case is handy
865 // and harmless.
866 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
867 if (isa<UndefValue>(U->getValue()))
868 return getSCEV(UndefValue::get(Ty));
869
870 // The cast wasn't folded; create an explicit cast node. We can reuse
871 // the existing insert position since if we get here, we won't have
872 // made any changes which would invalidate it.
873 SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
874 Op, Ty);
875 UniqueSCEVs.InsertNode(S, IP);
876 return S;
877 }
878
getZeroExtendExpr(const SCEV * Op,Type * Ty)879 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
880 Type *Ty) {
881 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
882 "This is not an extending conversion!");
883 assert(isSCEVable(Ty) &&
884 "This is not a conversion to a SCEVable type!");
885 Ty = getEffectiveSCEVType(Ty);
886
887 // Fold if the operand is constant.
888 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
889 return getConstant(
890 cast<ConstantInt>(ConstantExpr::getZExt(SC->getValue(),
891 getEffectiveSCEVType(Ty))));
892
893 // zext(zext(x)) --> zext(x)
894 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
895 return getZeroExtendExpr(SZ->getOperand(), Ty);
896
897 // Before doing any expensive analysis, check to see if we've already
898 // computed a SCEV for this Op and Ty.
899 FoldingSetNodeID ID;
900 ID.AddInteger(scZeroExtend);
901 ID.AddPointer(Op);
902 ID.AddPointer(Ty);
903 void *IP = 0;
904 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
905
906 // zext(trunc(x)) --> zext(x) or x or trunc(x)
907 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
908 // It's possible the bits taken off by the truncate were all zero bits. If
909 // so, we should be able to simplify this further.
910 const SCEV *X = ST->getOperand();
911 ConstantRange CR = getUnsignedRange(X);
912 unsigned TruncBits = getTypeSizeInBits(ST->getType());
913 unsigned NewBits = getTypeSizeInBits(Ty);
914 if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
915 CR.zextOrTrunc(NewBits)))
916 return getTruncateOrZeroExtend(X, Ty);
917 }
918
919 // If the input value is a chrec scev, and we can prove that the value
920 // did not overflow the old, smaller, value, we can zero extend all of the
921 // operands (often constants). This allows analysis of something like
922 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
923 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
924 if (AR->isAffine()) {
925 const SCEV *Start = AR->getStart();
926 const SCEV *Step = AR->getStepRecurrence(*this);
927 unsigned BitWidth = getTypeSizeInBits(AR->getType());
928 const Loop *L = AR->getLoop();
929
930 // If we have special knowledge that this addrec won't overflow,
931 // we don't need to do any further analysis.
932 if (AR->getNoWrapFlags(SCEV::FlagNUW))
933 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
934 getZeroExtendExpr(Step, Ty),
935 L, AR->getNoWrapFlags());
936
937 // Check whether the backedge-taken count is SCEVCouldNotCompute.
938 // Note that this serves two purposes: It filters out loops that are
939 // simply not analyzable, and it covers the case where this code is
940 // being called from within backedge-taken count analysis, such that
941 // attempting to ask for the backedge-taken count would likely result
942 // in infinite recursion. In the later case, the analysis code will
943 // cope with a conservative value, and it will take care to purge
944 // that value once it has finished.
945 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
946 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
947 // Manually compute the final value for AR, checking for
948 // overflow.
949
950 // Check whether the backedge-taken count can be losslessly casted to
951 // the addrec's type. The count is always unsigned.
952 const SCEV *CastedMaxBECount =
953 getTruncateOrZeroExtend(MaxBECount, Start->getType());
954 const SCEV *RecastedMaxBECount =
955 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
956 if (MaxBECount == RecastedMaxBECount) {
957 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
958 // Check whether Start+Step*MaxBECount has no unsigned overflow.
959 const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
960 const SCEV *Add = getAddExpr(Start, ZMul);
961 const SCEV *OperandExtendedAdd =
962 getAddExpr(getZeroExtendExpr(Start, WideTy),
963 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
964 getZeroExtendExpr(Step, WideTy)));
965 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) {
966 // Cache knowledge of AR NUW, which is propagated to this AddRec.
967 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
968 // Return the expression with the addrec on the outside.
969 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
970 getZeroExtendExpr(Step, Ty),
971 L, AR->getNoWrapFlags());
972 }
973 // Similar to above, only this time treat the step value as signed.
974 // This covers loops that count down.
975 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
976 Add = getAddExpr(Start, SMul);
977 OperandExtendedAdd =
978 getAddExpr(getZeroExtendExpr(Start, WideTy),
979 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
980 getSignExtendExpr(Step, WideTy)));
981 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd) {
982 // Cache knowledge of AR NW, which is propagated to this AddRec.
983 // Negative step causes unsigned wrap, but it still can't self-wrap.
984 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
985 // Return the expression with the addrec on the outside.
986 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
987 getSignExtendExpr(Step, Ty),
988 L, AR->getNoWrapFlags());
989 }
990 }
991
992 // If the backedge is guarded by a comparison with the pre-inc value
993 // the addrec is safe. Also, if the entry is guarded by a comparison
994 // with the start value and the backedge is guarded by a comparison
995 // with the post-inc value, the addrec is safe.
996 if (isKnownPositive(Step)) {
997 const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
998 getUnsignedRange(Step).getUnsignedMax());
999 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
1000 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
1001 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
1002 AR->getPostIncExpr(*this), N))) {
1003 // Cache knowledge of AR NUW, which is propagated to this AddRec.
1004 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNUW);
1005 // Return the expression with the addrec on the outside.
1006 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1007 getZeroExtendExpr(Step, Ty),
1008 L, AR->getNoWrapFlags());
1009 }
1010 } else if (isKnownNegative(Step)) {
1011 const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
1012 getSignedRange(Step).getSignedMin());
1013 if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) ||
1014 (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
1015 isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
1016 AR->getPostIncExpr(*this), N))) {
1017 // Cache knowledge of AR NW, which is propagated to this AddRec.
1018 // Negative step causes unsigned wrap, but it still can't self-wrap.
1019 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNW);
1020 // Return the expression with the addrec on the outside.
1021 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
1022 getSignExtendExpr(Step, Ty),
1023 L, AR->getNoWrapFlags());
1024 }
1025 }
1026 }
1027 }
1028
1029 // The cast wasn't folded; create an explicit cast node.
1030 // Recompute the insert position, as it may have been invalidated.
1031 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1032 SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
1033 Op, Ty);
1034 UniqueSCEVs.InsertNode(S, IP);
1035 return S;
1036 }
1037
1038 // Get the limit of a recurrence such that incrementing by Step cannot cause
1039 // signed overflow as long as the value of the recurrence within the loop does
1040 // not exceed this limit before incrementing.
getOverflowLimitForStep(const SCEV * Step,ICmpInst::Predicate * Pred,ScalarEvolution * SE)1041 static const SCEV *getOverflowLimitForStep(const SCEV *Step,
1042 ICmpInst::Predicate *Pred,
1043 ScalarEvolution *SE) {
1044 unsigned BitWidth = SE->getTypeSizeInBits(Step->getType());
1045 if (SE->isKnownPositive(Step)) {
1046 *Pred = ICmpInst::ICMP_SLT;
1047 return SE->getConstant(APInt::getSignedMinValue(BitWidth) -
1048 SE->getSignedRange(Step).getSignedMax());
1049 }
1050 if (SE->isKnownNegative(Step)) {
1051 *Pred = ICmpInst::ICMP_SGT;
1052 return SE->getConstant(APInt::getSignedMaxValue(BitWidth) -
1053 SE->getSignedRange(Step).getSignedMin());
1054 }
1055 return 0;
1056 }
1057
1058 // The recurrence AR has been shown to have no signed wrap. Typically, if we can
1059 // prove NSW for AR, then we can just as easily prove NSW for its preincrement
1060 // or postincrement sibling. This allows normalizing a sign extended AddRec as
1061 // such: {sext(Step + Start),+,Step} => {(Step + sext(Start),+,Step} As a
1062 // result, the expression "Step + sext(PreIncAR)" is congruent with
1063 // "sext(PostIncAR)"
getPreStartForSignExtend(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE)1064 static const SCEV *getPreStartForSignExtend(const SCEVAddRecExpr *AR,
1065 Type *Ty,
1066 ScalarEvolution *SE) {
1067 const Loop *L = AR->getLoop();
1068 const SCEV *Start = AR->getStart();
1069 const SCEV *Step = AR->getStepRecurrence(*SE);
1070
1071 // Check for a simple looking step prior to loop entry.
1072 const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Start);
1073 if (!SA)
1074 return 0;
1075
1076 // Create an AddExpr for "PreStart" after subtracting Step. Full SCEV
1077 // subtraction is expensive. For this purpose, perform a quick and dirty
1078 // difference, by checking for Step in the operand list.
1079 SmallVector<const SCEV *, 4> DiffOps;
1080 for (SCEVAddExpr::op_iterator I = SA->op_begin(), E = SA->op_end();
1081 I != E; ++I) {
1082 if (*I != Step)
1083 DiffOps.push_back(*I);
1084 }
1085 if (DiffOps.size() == SA->getNumOperands())
1086 return 0;
1087
1088 // This is a postinc AR. Check for overflow on the preinc recurrence using the
1089 // same three conditions that getSignExtendedExpr checks.
1090
1091 // 1. NSW flags on the step increment.
1092 const SCEV *PreStart = SE->getAddExpr(DiffOps, SA->getNoWrapFlags());
1093 const SCEVAddRecExpr *PreAR = dyn_cast<SCEVAddRecExpr>(
1094 SE->getAddRecExpr(PreStart, Step, L, SCEV::FlagAnyWrap));
1095
1096 if (PreAR && PreAR->getNoWrapFlags(SCEV::FlagNSW))
1097 return PreStart;
1098
1099 // 2. Direct overflow check on the step operation's expression.
1100 unsigned BitWidth = SE->getTypeSizeInBits(AR->getType());
1101 Type *WideTy = IntegerType::get(SE->getContext(), BitWidth * 2);
1102 const SCEV *OperandExtendedStart =
1103 SE->getAddExpr(SE->getSignExtendExpr(PreStart, WideTy),
1104 SE->getSignExtendExpr(Step, WideTy));
1105 if (SE->getSignExtendExpr(Start, WideTy) == OperandExtendedStart) {
1106 // Cache knowledge of PreAR NSW.
1107 if (PreAR)
1108 const_cast<SCEVAddRecExpr *>(PreAR)->setNoWrapFlags(SCEV::FlagNSW);
1109 // FIXME: this optimization needs a unit test
1110 DEBUG(dbgs() << "SCEV: untested prestart overflow check\n");
1111 return PreStart;
1112 }
1113
1114 // 3. Loop precondition.
1115 ICmpInst::Predicate Pred;
1116 const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, SE);
1117
1118 if (OverflowLimit &&
1119 SE->isLoopEntryGuardedByCond(L, Pred, PreStart, OverflowLimit)) {
1120 return PreStart;
1121 }
1122 return 0;
1123 }
1124
1125 // Get the normalized sign-extended expression for this AddRec's Start.
getSignExtendAddRecStart(const SCEVAddRecExpr * AR,Type * Ty,ScalarEvolution * SE)1126 static const SCEV *getSignExtendAddRecStart(const SCEVAddRecExpr *AR,
1127 Type *Ty,
1128 ScalarEvolution *SE) {
1129 const SCEV *PreStart = getPreStartForSignExtend(AR, Ty, SE);
1130 if (!PreStart)
1131 return SE->getSignExtendExpr(AR->getStart(), Ty);
1132
1133 return SE->getAddExpr(SE->getSignExtendExpr(AR->getStepRecurrence(*SE), Ty),
1134 SE->getSignExtendExpr(PreStart, Ty));
1135 }
1136
getSignExtendExpr(const SCEV * Op,Type * Ty)1137 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
1138 Type *Ty) {
1139 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1140 "This is not an extending conversion!");
1141 assert(isSCEVable(Ty) &&
1142 "This is not a conversion to a SCEVable type!");
1143 Ty = getEffectiveSCEVType(Ty);
1144
1145 // Fold if the operand is constant.
1146 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1147 return getConstant(
1148 cast<ConstantInt>(ConstantExpr::getSExt(SC->getValue(),
1149 getEffectiveSCEVType(Ty))));
1150
1151 // sext(sext(x)) --> sext(x)
1152 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
1153 return getSignExtendExpr(SS->getOperand(), Ty);
1154
1155 // sext(zext(x)) --> zext(x)
1156 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
1157 return getZeroExtendExpr(SZ->getOperand(), Ty);
1158
1159 // Before doing any expensive analysis, check to see if we've already
1160 // computed a SCEV for this Op and Ty.
1161 FoldingSetNodeID ID;
1162 ID.AddInteger(scSignExtend);
1163 ID.AddPointer(Op);
1164 ID.AddPointer(Ty);
1165 void *IP = 0;
1166 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1167
1168 // If the input value is provably positive, build a zext instead.
1169 if (isKnownNonNegative(Op))
1170 return getZeroExtendExpr(Op, Ty);
1171
1172 // sext(trunc(x)) --> sext(x) or x or trunc(x)
1173 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
1174 // It's possible the bits taken off by the truncate were all sign bits. If
1175 // so, we should be able to simplify this further.
1176 const SCEV *X = ST->getOperand();
1177 ConstantRange CR = getSignedRange(X);
1178 unsigned TruncBits = getTypeSizeInBits(ST->getType());
1179 unsigned NewBits = getTypeSizeInBits(Ty);
1180 if (CR.truncate(TruncBits).signExtend(NewBits).contains(
1181 CR.sextOrTrunc(NewBits)))
1182 return getTruncateOrSignExtend(X, Ty);
1183 }
1184
1185 // If the input value is a chrec scev, and we can prove that the value
1186 // did not overflow the old, smaller, value, we can sign extend all of the
1187 // operands (often constants). This allows analysis of something like
1188 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
1189 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
1190 if (AR->isAffine()) {
1191 const SCEV *Start = AR->getStart();
1192 const SCEV *Step = AR->getStepRecurrence(*this);
1193 unsigned BitWidth = getTypeSizeInBits(AR->getType());
1194 const Loop *L = AR->getLoop();
1195
1196 // If we have special knowledge that this addrec won't overflow,
1197 // we don't need to do any further analysis.
1198 if (AR->getNoWrapFlags(SCEV::FlagNSW))
1199 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1200 getSignExtendExpr(Step, Ty),
1201 L, SCEV::FlagNSW);
1202
1203 // Check whether the backedge-taken count is SCEVCouldNotCompute.
1204 // Note that this serves two purposes: It filters out loops that are
1205 // simply not analyzable, and it covers the case where this code is
1206 // being called from within backedge-taken count analysis, such that
1207 // attempting to ask for the backedge-taken count would likely result
1208 // in infinite recursion. In the later case, the analysis code will
1209 // cope with a conservative value, and it will take care to purge
1210 // that value once it has finished.
1211 const SCEV *MaxBECount = getMaxBackedgeTakenCount(L);
1212 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
1213 // Manually compute the final value for AR, checking for
1214 // overflow.
1215
1216 // Check whether the backedge-taken count can be losslessly casted to
1217 // the addrec's type. The count is always unsigned.
1218 const SCEV *CastedMaxBECount =
1219 getTruncateOrZeroExtend(MaxBECount, Start->getType());
1220 const SCEV *RecastedMaxBECount =
1221 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
1222 if (MaxBECount == RecastedMaxBECount) {
1223 Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
1224 // Check whether Start+Step*MaxBECount has no signed overflow.
1225 const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
1226 const SCEV *Add = getAddExpr(Start, SMul);
1227 const SCEV *OperandExtendedAdd =
1228 getAddExpr(getSignExtendExpr(Start, WideTy),
1229 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1230 getSignExtendExpr(Step, WideTy)));
1231 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) {
1232 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1233 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1234 // Return the expression with the addrec on the outside.
1235 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1236 getSignExtendExpr(Step, Ty),
1237 L, AR->getNoWrapFlags());
1238 }
1239 // Similar to above, only this time treat the step value as unsigned.
1240 // This covers loops that count up with an unsigned step.
1241 const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
1242 Add = getAddExpr(Start, UMul);
1243 OperandExtendedAdd =
1244 getAddExpr(getSignExtendExpr(Start, WideTy),
1245 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
1246 getZeroExtendExpr(Step, WideTy)));
1247 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd) {
1248 // Cache knowledge of AR NSW, which is propagated to this AddRec.
1249 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1250 // Return the expression with the addrec on the outside.
1251 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1252 getZeroExtendExpr(Step, Ty),
1253 L, AR->getNoWrapFlags());
1254 }
1255 }
1256
1257 // If the backedge is guarded by a comparison with the pre-inc value
1258 // the addrec is safe. Also, if the entry is guarded by a comparison
1259 // with the start value and the backedge is guarded by a comparison
1260 // with the post-inc value, the addrec is safe.
1261 ICmpInst::Predicate Pred;
1262 const SCEV *OverflowLimit = getOverflowLimitForStep(Step, &Pred, this);
1263 if (OverflowLimit &&
1264 (isLoopBackedgeGuardedByCond(L, Pred, AR, OverflowLimit) ||
1265 (isLoopEntryGuardedByCond(L, Pred, Start, OverflowLimit) &&
1266 isLoopBackedgeGuardedByCond(L, Pred, AR->getPostIncExpr(*this),
1267 OverflowLimit)))) {
1268 // Cache knowledge of AR NSW, then propagate NSW to the wide AddRec.
1269 const_cast<SCEVAddRecExpr *>(AR)->setNoWrapFlags(SCEV::FlagNSW);
1270 return getAddRecExpr(getSignExtendAddRecStart(AR, Ty, this),
1271 getSignExtendExpr(Step, Ty),
1272 L, AR->getNoWrapFlags());
1273 }
1274 }
1275 }
1276
1277 // The cast wasn't folded; create an explicit cast node.
1278 // Recompute the insert position, as it may have been invalidated.
1279 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1280 SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
1281 Op, Ty);
1282 UniqueSCEVs.InsertNode(S, IP);
1283 return S;
1284 }
1285
1286 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
1287 /// unspecified bits out to the given type.
1288 ///
getAnyExtendExpr(const SCEV * Op,Type * Ty)1289 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
1290 Type *Ty) {
1291 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
1292 "This is not an extending conversion!");
1293 assert(isSCEVable(Ty) &&
1294 "This is not a conversion to a SCEVable type!");
1295 Ty = getEffectiveSCEVType(Ty);
1296
1297 // Sign-extend negative constants.
1298 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
1299 if (SC->getValue()->getValue().isNegative())
1300 return getSignExtendExpr(Op, Ty);
1301
1302 // Peel off a truncate cast.
1303 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
1304 const SCEV *NewOp = T->getOperand();
1305 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
1306 return getAnyExtendExpr(NewOp, Ty);
1307 return getTruncateOrNoop(NewOp, Ty);
1308 }
1309
1310 // Next try a zext cast. If the cast is folded, use it.
1311 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
1312 if (!isa<SCEVZeroExtendExpr>(ZExt))
1313 return ZExt;
1314
1315 // Next try a sext cast. If the cast is folded, use it.
1316 const SCEV *SExt = getSignExtendExpr(Op, Ty);
1317 if (!isa<SCEVSignExtendExpr>(SExt))
1318 return SExt;
1319
1320 // Force the cast to be folded into the operands of an addrec.
1321 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
1322 SmallVector<const SCEV *, 4> Ops;
1323 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
1324 I != E; ++I)
1325 Ops.push_back(getAnyExtendExpr(*I, Ty));
1326 return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagNW);
1327 }
1328
1329 // As a special case, fold anyext(undef) to undef. We don't want to
1330 // know too much about SCEVUnknowns, but this special case is handy
1331 // and harmless.
1332 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(Op))
1333 if (isa<UndefValue>(U->getValue()))
1334 return getSCEV(UndefValue::get(Ty));
1335
1336 // If the expression is obviously signed, use the sext cast value.
1337 if (isa<SCEVSMaxExpr>(Op))
1338 return SExt;
1339
1340 // Absent any other information, use the zext cast value.
1341 return ZExt;
1342 }
1343
1344 /// CollectAddOperandsWithScales - Process the given Ops list, which is
1345 /// a list of operands to be added under the given scale, update the given
1346 /// map. This is a helper function for getAddRecExpr. As an example of
1347 /// what it does, given a sequence of operands that would form an add
1348 /// expression like this:
1349 ///
1350 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
1351 ///
1352 /// where A and B are constants, update the map with these values:
1353 ///
1354 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
1355 ///
1356 /// and add 13 + A*B*29 to AccumulatedConstant.
1357 /// This will allow getAddRecExpr to produce this:
1358 ///
1359 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1360 ///
1361 /// This form often exposes folding opportunities that are hidden in
1362 /// the original operand list.
1363 ///
1364 /// Return true iff it appears that any interesting folding opportunities
1365 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1366 /// the common case where no interesting opportunities are present, and
1367 /// is also used as a check to avoid infinite recursion.
1368 ///
1369 static bool
CollectAddOperandsWithScales(DenseMap<const SCEV *,APInt> & M,SmallVector<const SCEV *,8> & NewOps,APInt & AccumulatedConstant,const SCEV * const * Ops,size_t NumOperands,const APInt & Scale,ScalarEvolution & SE)1370 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1371 SmallVector<const SCEV *, 8> &NewOps,
1372 APInt &AccumulatedConstant,
1373 const SCEV *const *Ops, size_t NumOperands,
1374 const APInt &Scale,
1375 ScalarEvolution &SE) {
1376 bool Interesting = false;
1377
1378 // Iterate over the add operands. They are sorted, with constants first.
1379 unsigned i = 0;
1380 while (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1381 ++i;
1382 // Pull a buried constant out to the outside.
1383 if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
1384 Interesting = true;
1385 AccumulatedConstant += Scale * C->getValue()->getValue();
1386 }
1387
1388 // Next comes everything else. We're especially interested in multiplies
1389 // here, but they're in the middle, so just visit the rest with one loop.
1390 for (; i != NumOperands; ++i) {
1391 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1392 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1393 APInt NewScale =
1394 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1395 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1396 // A multiplication of a constant with another add; recurse.
1397 const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
1398 Interesting |=
1399 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1400 Add->op_begin(), Add->getNumOperands(),
1401 NewScale, SE);
1402 } else {
1403 // A multiplication of a constant with some other value. Update
1404 // the map.
1405 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1406 const SCEV *Key = SE.getMulExpr(MulOps);
1407 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1408 M.insert(std::make_pair(Key, NewScale));
1409 if (Pair.second) {
1410 NewOps.push_back(Pair.first->first);
1411 } else {
1412 Pair.first->second += NewScale;
1413 // The map already had an entry for this value, which may indicate
1414 // a folding opportunity.
1415 Interesting = true;
1416 }
1417 }
1418 } else {
1419 // An ordinary operand. Update the map.
1420 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1421 M.insert(std::make_pair(Ops[i], Scale));
1422 if (Pair.second) {
1423 NewOps.push_back(Pair.first->first);
1424 } else {
1425 Pair.first->second += Scale;
1426 // The map already had an entry for this value, which may indicate
1427 // a folding opportunity.
1428 Interesting = true;
1429 }
1430 }
1431 }
1432
1433 return Interesting;
1434 }
1435
1436 namespace {
1437 struct APIntCompare {
operator ()__anon07d303b50211::APIntCompare1438 bool operator()(const APInt &LHS, const APInt &RHS) const {
1439 return LHS.ult(RHS);
1440 }
1441 };
1442 }
1443
1444 /// getAddExpr - Get a canonical add expression, or something simpler if
1445 /// possible.
getAddExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags Flags)1446 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
1447 SCEV::NoWrapFlags Flags) {
1448 assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
1449 "only nuw or nsw allowed");
1450 assert(!Ops.empty() && "Cannot get empty add!");
1451 if (Ops.size() == 1) return Ops[0];
1452 #ifndef NDEBUG
1453 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1454 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1455 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1456 "SCEVAddExpr operand types don't match!");
1457 #endif
1458
1459 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1460 // And vice-versa.
1461 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1462 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1463 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
1464 bool All = true;
1465 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1466 E = Ops.end(); I != E; ++I)
1467 if (!isKnownNonNegative(*I)) {
1468 All = false;
1469 break;
1470 }
1471 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
1472 }
1473
1474 // Sort by complexity, this groups all similar expression types together.
1475 GroupByComplexity(Ops, LI);
1476
1477 // If there are any constants, fold them together.
1478 unsigned Idx = 0;
1479 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1480 ++Idx;
1481 assert(Idx < Ops.size());
1482 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1483 // We found two constants, fold them together!
1484 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1485 RHSC->getValue()->getValue());
1486 if (Ops.size() == 2) return Ops[0];
1487 Ops.erase(Ops.begin()+1); // Erase the folded element
1488 LHSC = cast<SCEVConstant>(Ops[0]);
1489 }
1490
1491 // If we are left with a constant zero being added, strip it off.
1492 if (LHSC->getValue()->isZero()) {
1493 Ops.erase(Ops.begin());
1494 --Idx;
1495 }
1496
1497 if (Ops.size() == 1) return Ops[0];
1498 }
1499
1500 // Okay, check to see if the same value occurs in the operand list more than
1501 // once. If so, merge them together into an multiply expression. Since we
1502 // sorted the list, these values are required to be adjacent.
1503 Type *Ty = Ops[0]->getType();
1504 bool FoundMatch = false;
1505 for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
1506 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
1507 // Scan ahead to count how many equal operands there are.
1508 unsigned Count = 2;
1509 while (i+Count != e && Ops[i+Count] == Ops[i])
1510 ++Count;
1511 // Merge the values into a multiply.
1512 const SCEV *Scale = getConstant(Ty, Count);
1513 const SCEV *Mul = getMulExpr(Scale, Ops[i]);
1514 if (Ops.size() == Count)
1515 return Mul;
1516 Ops[i] = Mul;
1517 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
1518 --i; e -= Count - 1;
1519 FoundMatch = true;
1520 }
1521 if (FoundMatch)
1522 return getAddExpr(Ops, Flags);
1523
1524 // Check for truncates. If all the operands are truncated from the same
1525 // type, see if factoring out the truncate would permit the result to be
1526 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1527 // if the contents of the resulting outer trunc fold to something simple.
1528 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1529 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1530 Type *DstType = Trunc->getType();
1531 Type *SrcType = Trunc->getOperand()->getType();
1532 SmallVector<const SCEV *, 8> LargeOps;
1533 bool Ok = true;
1534 // Check all the operands to see if they can be represented in the
1535 // source type of the truncate.
1536 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1537 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1538 if (T->getOperand()->getType() != SrcType) {
1539 Ok = false;
1540 break;
1541 }
1542 LargeOps.push_back(T->getOperand());
1543 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1544 LargeOps.push_back(getAnyExtendExpr(C, SrcType));
1545 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1546 SmallVector<const SCEV *, 8> LargeMulOps;
1547 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1548 if (const SCEVTruncateExpr *T =
1549 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1550 if (T->getOperand()->getType() != SrcType) {
1551 Ok = false;
1552 break;
1553 }
1554 LargeMulOps.push_back(T->getOperand());
1555 } else if (const SCEVConstant *C =
1556 dyn_cast<SCEVConstant>(M->getOperand(j))) {
1557 LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
1558 } else {
1559 Ok = false;
1560 break;
1561 }
1562 }
1563 if (Ok)
1564 LargeOps.push_back(getMulExpr(LargeMulOps));
1565 } else {
1566 Ok = false;
1567 break;
1568 }
1569 }
1570 if (Ok) {
1571 // Evaluate the expression in the larger type.
1572 const SCEV *Fold = getAddExpr(LargeOps, Flags);
1573 // If it folds to something simple, use it. Otherwise, don't.
1574 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1575 return getTruncateExpr(Fold, DstType);
1576 }
1577 }
1578
1579 // Skip past any other cast SCEVs.
1580 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1581 ++Idx;
1582
1583 // If there are add operands they would be next.
1584 if (Idx < Ops.size()) {
1585 bool DeletedAdd = false;
1586 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1587 // If we have an add, expand the add operands onto the end of the operands
1588 // list.
1589 Ops.erase(Ops.begin()+Idx);
1590 Ops.append(Add->op_begin(), Add->op_end());
1591 DeletedAdd = true;
1592 }
1593
1594 // If we deleted at least one add, we added operands to the end of the list,
1595 // and they are not necessarily sorted. Recurse to resort and resimplify
1596 // any operands we just acquired.
1597 if (DeletedAdd)
1598 return getAddExpr(Ops);
1599 }
1600
1601 // Skip over the add expression until we get to a multiply.
1602 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1603 ++Idx;
1604
1605 // Check to see if there are any folding opportunities present with
1606 // operands multiplied by constant values.
1607 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1608 uint64_t BitWidth = getTypeSizeInBits(Ty);
1609 DenseMap<const SCEV *, APInt> M;
1610 SmallVector<const SCEV *, 8> NewOps;
1611 APInt AccumulatedConstant(BitWidth, 0);
1612 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1613 Ops.data(), Ops.size(),
1614 APInt(BitWidth, 1), *this)) {
1615 // Some interesting folding opportunity is present, so its worthwhile to
1616 // re-generate the operands list. Group the operands by constant scale,
1617 // to avoid multiplying by the same constant scale multiple times.
1618 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1619 for (SmallVector<const SCEV *, 8>::const_iterator I = NewOps.begin(),
1620 E = NewOps.end(); I != E; ++I)
1621 MulOpLists[M.find(*I)->second].push_back(*I);
1622 // Re-generate the operands list.
1623 Ops.clear();
1624 if (AccumulatedConstant != 0)
1625 Ops.push_back(getConstant(AccumulatedConstant));
1626 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1627 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1628 if (I->first != 0)
1629 Ops.push_back(getMulExpr(getConstant(I->first),
1630 getAddExpr(I->second)));
1631 if (Ops.empty())
1632 return getConstant(Ty, 0);
1633 if (Ops.size() == 1)
1634 return Ops[0];
1635 return getAddExpr(Ops);
1636 }
1637 }
1638
1639 // If we are adding something to a multiply expression, make sure the
1640 // something is not already an operand of the multiply. If so, merge it into
1641 // the multiply.
1642 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1643 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1644 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1645 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1646 if (isa<SCEVConstant>(MulOpSCEV))
1647 continue;
1648 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1649 if (MulOpSCEV == Ops[AddOp]) {
1650 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1651 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1652 if (Mul->getNumOperands() != 2) {
1653 // If the multiply has more than two operands, we must get the
1654 // Y*Z term.
1655 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1656 Mul->op_begin()+MulOp);
1657 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1658 InnerMul = getMulExpr(MulOps);
1659 }
1660 const SCEV *One = getConstant(Ty, 1);
1661 const SCEV *AddOne = getAddExpr(One, InnerMul);
1662 const SCEV *OuterMul = getMulExpr(AddOne, MulOpSCEV);
1663 if (Ops.size() == 2) return OuterMul;
1664 if (AddOp < Idx) {
1665 Ops.erase(Ops.begin()+AddOp);
1666 Ops.erase(Ops.begin()+Idx-1);
1667 } else {
1668 Ops.erase(Ops.begin()+Idx);
1669 Ops.erase(Ops.begin()+AddOp-1);
1670 }
1671 Ops.push_back(OuterMul);
1672 return getAddExpr(Ops);
1673 }
1674
1675 // Check this multiply against other multiplies being added together.
1676 for (unsigned OtherMulIdx = Idx+1;
1677 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1678 ++OtherMulIdx) {
1679 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1680 // If MulOp occurs in OtherMul, we can fold the two multiplies
1681 // together.
1682 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1683 OMulOp != e; ++OMulOp)
1684 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1685 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1686 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1687 if (Mul->getNumOperands() != 2) {
1688 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1689 Mul->op_begin()+MulOp);
1690 MulOps.append(Mul->op_begin()+MulOp+1, Mul->op_end());
1691 InnerMul1 = getMulExpr(MulOps);
1692 }
1693 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1694 if (OtherMul->getNumOperands() != 2) {
1695 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1696 OtherMul->op_begin()+OMulOp);
1697 MulOps.append(OtherMul->op_begin()+OMulOp+1, OtherMul->op_end());
1698 InnerMul2 = getMulExpr(MulOps);
1699 }
1700 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1701 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1702 if (Ops.size() == 2) return OuterMul;
1703 Ops.erase(Ops.begin()+Idx);
1704 Ops.erase(Ops.begin()+OtherMulIdx-1);
1705 Ops.push_back(OuterMul);
1706 return getAddExpr(Ops);
1707 }
1708 }
1709 }
1710 }
1711
1712 // If there are any add recurrences in the operands list, see if any other
1713 // added values are loop invariant. If so, we can fold them into the
1714 // recurrence.
1715 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1716 ++Idx;
1717
1718 // Scan over all recurrences, trying to fold loop invariants into them.
1719 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1720 // Scan all of the other operands to this add and add them to the vector if
1721 // they are loop invariant w.r.t. the recurrence.
1722 SmallVector<const SCEV *, 8> LIOps;
1723 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1724 const Loop *AddRecLoop = AddRec->getLoop();
1725 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1726 if (isLoopInvariant(Ops[i], AddRecLoop)) {
1727 LIOps.push_back(Ops[i]);
1728 Ops.erase(Ops.begin()+i);
1729 --i; --e;
1730 }
1731
1732 // If we found some loop invariants, fold them into the recurrence.
1733 if (!LIOps.empty()) {
1734 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1735 LIOps.push_back(AddRec->getStart());
1736
1737 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1738 AddRec->op_end());
1739 AddRecOps[0] = getAddExpr(LIOps);
1740
1741 // Build the new addrec. Propagate the NUW and NSW flags if both the
1742 // outer add and the inner addrec are guaranteed to have no overflow.
1743 // Always propagate NW.
1744 Flags = AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW));
1745 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
1746
1747 // If all of the other operands were loop invariant, we are done.
1748 if (Ops.size() == 1) return NewRec;
1749
1750 // Otherwise, add the folded AddRec by the non-invariant parts.
1751 for (unsigned i = 0;; ++i)
1752 if (Ops[i] == AddRec) {
1753 Ops[i] = NewRec;
1754 break;
1755 }
1756 return getAddExpr(Ops);
1757 }
1758
1759 // Okay, if there weren't any loop invariants to be folded, check to see if
1760 // there are multiple AddRec's with the same loop induction variable being
1761 // added together. If so, we can fold them.
1762 for (unsigned OtherIdx = Idx+1;
1763 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1764 ++OtherIdx)
1765 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
1766 // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
1767 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1768 AddRec->op_end());
1769 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
1770 ++OtherIdx)
1771 if (const SCEVAddRecExpr *OtherAddRec =
1772 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
1773 if (OtherAddRec->getLoop() == AddRecLoop) {
1774 for (unsigned i = 0, e = OtherAddRec->getNumOperands();
1775 i != e; ++i) {
1776 if (i >= AddRecOps.size()) {
1777 AddRecOps.append(OtherAddRec->op_begin()+i,
1778 OtherAddRec->op_end());
1779 break;
1780 }
1781 AddRecOps[i] = getAddExpr(AddRecOps[i],
1782 OtherAddRec->getOperand(i));
1783 }
1784 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
1785 }
1786 // Step size has changed, so we cannot guarantee no self-wraparound.
1787 Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
1788 return getAddExpr(Ops);
1789 }
1790
1791 // Otherwise couldn't fold anything into this recurrence. Move onto the
1792 // next one.
1793 }
1794
1795 // Okay, it looks like we really DO need an add expr. Check to see if we
1796 // already have one, otherwise create a new one.
1797 FoldingSetNodeID ID;
1798 ID.AddInteger(scAddExpr);
1799 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1800 ID.AddPointer(Ops[i]);
1801 void *IP = 0;
1802 SCEVAddExpr *S =
1803 static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
1804 if (!S) {
1805 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
1806 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
1807 S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
1808 O, Ops.size());
1809 UniqueSCEVs.InsertNode(S, IP);
1810 }
1811 S->setNoWrapFlags(Flags);
1812 return S;
1813 }
1814
umul_ov(uint64_t i,uint64_t j,bool & Overflow)1815 static uint64_t umul_ov(uint64_t i, uint64_t j, bool &Overflow) {
1816 uint64_t k = i*j;
1817 if (j > 1 && k / j != i) Overflow = true;
1818 return k;
1819 }
1820
1821 /// Compute the result of "n choose k", the binomial coefficient. If an
1822 /// intermediate computation overflows, Overflow will be set and the return will
1823 /// be garbage. Overflow is not cleared on absense of overflow.
Choose(uint64_t n,uint64_t k,bool & Overflow)1824 static uint64_t Choose(uint64_t n, uint64_t k, bool &Overflow) {
1825 // We use the multiplicative formula:
1826 // n(n-1)(n-2)...(n-(k-1)) / k(k-1)(k-2)...1 .
1827 // At each iteration, we take the n-th term of the numeral and divide by the
1828 // (k-n)th term of the denominator. This division will always produce an
1829 // integral result, and helps reduce the chance of overflow in the
1830 // intermediate computations. However, we can still overflow even when the
1831 // final result would fit.
1832
1833 if (n == 0 || n == k) return 1;
1834 if (k > n) return 0;
1835
1836 if (k > n/2)
1837 k = n-k;
1838
1839 uint64_t r = 1;
1840 for (uint64_t i = 1; i <= k; ++i) {
1841 r = umul_ov(r, n-(i-1), Overflow);
1842 r /= i;
1843 }
1844 return r;
1845 }
1846
1847 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1848 /// possible.
getMulExpr(SmallVectorImpl<const SCEV * > & Ops,SCEV::NoWrapFlags Flags)1849 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
1850 SCEV::NoWrapFlags Flags) {
1851 assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
1852 "only nuw or nsw allowed");
1853 assert(!Ops.empty() && "Cannot get empty mul!");
1854 if (Ops.size() == 1) return Ops[0];
1855 #ifndef NDEBUG
1856 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
1857 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1858 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
1859 "SCEVMulExpr operand types don't match!");
1860 #endif
1861
1862 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
1863 // And vice-versa.
1864 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
1865 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
1866 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
1867 bool All = true;
1868 for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
1869 E = Ops.end(); I != E; ++I)
1870 if (!isKnownNonNegative(*I)) {
1871 All = false;
1872 break;
1873 }
1874 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
1875 }
1876
1877 // Sort by complexity, this groups all similar expression types together.
1878 GroupByComplexity(Ops, LI);
1879
1880 // If there are any constants, fold them together.
1881 unsigned Idx = 0;
1882 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1883
1884 // C1*(C2+V) -> C1*C2 + C1*V
1885 if (Ops.size() == 2)
1886 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1887 if (Add->getNumOperands() == 2 &&
1888 isa<SCEVConstant>(Add->getOperand(0)))
1889 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1890 getMulExpr(LHSC, Add->getOperand(1)));
1891
1892 ++Idx;
1893 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1894 // We found two constants, fold them together!
1895 ConstantInt *Fold = ConstantInt::get(getContext(),
1896 LHSC->getValue()->getValue() *
1897 RHSC->getValue()->getValue());
1898 Ops[0] = getConstant(Fold);
1899 Ops.erase(Ops.begin()+1); // Erase the folded element
1900 if (Ops.size() == 1) return Ops[0];
1901 LHSC = cast<SCEVConstant>(Ops[0]);
1902 }
1903
1904 // If we are left with a constant one being multiplied, strip it off.
1905 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1906 Ops.erase(Ops.begin());
1907 --Idx;
1908 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1909 // If we have a multiply of zero, it will always be zero.
1910 return Ops[0];
1911 } else if (Ops[0]->isAllOnesValue()) {
1912 // If we have a mul by -1 of an add, try distributing the -1 among the
1913 // add operands.
1914 if (Ops.size() == 2) {
1915 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
1916 SmallVector<const SCEV *, 4> NewOps;
1917 bool AnyFolded = false;
1918 for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
1919 E = Add->op_end(); I != E; ++I) {
1920 const SCEV *Mul = getMulExpr(Ops[0], *I);
1921 if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
1922 NewOps.push_back(Mul);
1923 }
1924 if (AnyFolded)
1925 return getAddExpr(NewOps);
1926 }
1927 else if (const SCEVAddRecExpr *
1928 AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
1929 // Negation preserves a recurrence's no self-wrap property.
1930 SmallVector<const SCEV *, 4> Operands;
1931 for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
1932 E = AddRec->op_end(); I != E; ++I) {
1933 Operands.push_back(getMulExpr(Ops[0], *I));
1934 }
1935 return getAddRecExpr(Operands, AddRec->getLoop(),
1936 AddRec->getNoWrapFlags(SCEV::FlagNW));
1937 }
1938 }
1939 }
1940
1941 if (Ops.size() == 1)
1942 return Ops[0];
1943 }
1944
1945 // Skip over the add expression until we get to a multiply.
1946 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1947 ++Idx;
1948
1949 // If there are mul operands inline them all into this expression.
1950 if (Idx < Ops.size()) {
1951 bool DeletedMul = false;
1952 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1953 // If we have an mul, expand the mul operands onto the end of the operands
1954 // list.
1955 Ops.erase(Ops.begin()+Idx);
1956 Ops.append(Mul->op_begin(), Mul->op_end());
1957 DeletedMul = true;
1958 }
1959
1960 // If we deleted at least one mul, we added operands to the end of the list,
1961 // and they are not necessarily sorted. Recurse to resort and resimplify
1962 // any operands we just acquired.
1963 if (DeletedMul)
1964 return getMulExpr(Ops);
1965 }
1966
1967 // If there are any add recurrences in the operands list, see if any other
1968 // added values are loop invariant. If so, we can fold them into the
1969 // recurrence.
1970 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1971 ++Idx;
1972
1973 // Scan over all recurrences, trying to fold loop invariants into them.
1974 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1975 // Scan all of the other operands to this mul and add them to the vector if
1976 // they are loop invariant w.r.t. the recurrence.
1977 SmallVector<const SCEV *, 8> LIOps;
1978 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1979 const Loop *AddRecLoop = AddRec->getLoop();
1980 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1981 if (isLoopInvariant(Ops[i], AddRecLoop)) {
1982 LIOps.push_back(Ops[i]);
1983 Ops.erase(Ops.begin()+i);
1984 --i; --e;
1985 }
1986
1987 // If we found some loop invariants, fold them into the recurrence.
1988 if (!LIOps.empty()) {
1989 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
1990 SmallVector<const SCEV *, 4> NewOps;
1991 NewOps.reserve(AddRec->getNumOperands());
1992 const SCEV *Scale = getMulExpr(LIOps);
1993 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1994 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1995
1996 // Build the new addrec. Propagate the NUW and NSW flags if both the
1997 // outer mul and the inner addrec are guaranteed to have no overflow.
1998 //
1999 // No self-wrap cannot be guaranteed after changing the step size, but
2000 // will be inferred if either NUW or NSW is true.
2001 Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
2002 const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
2003
2004 // If all of the other operands were loop invariant, we are done.
2005 if (Ops.size() == 1) return NewRec;
2006
2007 // Otherwise, multiply the folded AddRec by the non-invariant parts.
2008 for (unsigned i = 0;; ++i)
2009 if (Ops[i] == AddRec) {
2010 Ops[i] = NewRec;
2011 break;
2012 }
2013 return getMulExpr(Ops);
2014 }
2015
2016 // Okay, if there weren't any loop invariants to be folded, check to see if
2017 // there are multiple AddRec's with the same loop induction variable being
2018 // multiplied together. If so, we can fold them.
2019 for (unsigned OtherIdx = Idx+1;
2020 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2021 ++OtherIdx) {
2022 if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
2023 // {A1,+,A2,+,...,+,An}<L> * {B1,+,B2,+,...,+,Bn}<L>
2024 // = {x=1 in [ sum y=x..2x [ sum z=max(y-x, y-n)..min(x,n) [
2025 // choose(x, 2x)*choose(2x-y, x-z)*A_{y-z}*B_z
2026 // ]]],+,...up to x=2n}.
2027 // Note that the arguments to choose() are always integers with values
2028 // known at compile time, never SCEV objects.
2029 //
2030 // The implementation avoids pointless extra computations when the two
2031 // addrec's are of different length (mathematically, it's equivalent to
2032 // an infinite stream of zeros on the right).
2033 bool OpsModified = false;
2034 for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
2035 ++OtherIdx)
2036 if (const SCEVAddRecExpr *OtherAddRec =
2037 dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
2038 if (OtherAddRec->getLoop() == AddRecLoop) {
2039 bool Overflow = false;
2040 Type *Ty = AddRec->getType();
2041 bool LargerThan64Bits = getTypeSizeInBits(Ty) > 64;
2042 SmallVector<const SCEV*, 7> AddRecOps;
2043 for (int x = 0, xe = AddRec->getNumOperands() +
2044 OtherAddRec->getNumOperands() - 1;
2045 x != xe && !Overflow; ++x) {
2046 const SCEV *Term = getConstant(Ty, 0);
2047 for (int y = x, ye = 2*x+1; y != ye && !Overflow; ++y) {
2048 uint64_t Coeff1 = Choose(x, 2*x - y, Overflow);
2049 for (int z = std::max(y-x, y-(int)AddRec->getNumOperands()+1),
2050 ze = std::min(x+1, (int)OtherAddRec->getNumOperands());
2051 z < ze && !Overflow; ++z) {
2052 uint64_t Coeff2 = Choose(2*x - y, x-z, Overflow);
2053 uint64_t Coeff;
2054 if (LargerThan64Bits)
2055 Coeff = umul_ov(Coeff1, Coeff2, Overflow);
2056 else
2057 Coeff = Coeff1*Coeff2;
2058 const SCEV *CoeffTerm = getConstant(Ty, Coeff);
2059 const SCEV *Term1 = AddRec->getOperand(y-z);
2060 const SCEV *Term2 = OtherAddRec->getOperand(z);
2061 Term = getAddExpr(Term, getMulExpr(CoeffTerm, Term1,Term2));
2062 }
2063 }
2064 AddRecOps.push_back(Term);
2065 }
2066 if (!Overflow) {
2067 const SCEV *NewAddRec = getAddRecExpr(AddRecOps,
2068 AddRec->getLoop(),
2069 SCEV::FlagAnyWrap);
2070 if (Ops.size() == 2) return NewAddRec;
2071 Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
2072 Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
2073 OpsModified = true;
2074 }
2075 }
2076 if (OpsModified)
2077 return getMulExpr(Ops);
2078 }
2079 }
2080
2081 // Otherwise couldn't fold anything into this recurrence. Move onto the
2082 // next one.
2083 }
2084
2085 // Okay, it looks like we really DO need an mul expr. Check to see if we
2086 // already have one, otherwise create a new one.
2087 FoldingSetNodeID ID;
2088 ID.AddInteger(scMulExpr);
2089 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2090 ID.AddPointer(Ops[i]);
2091 void *IP = 0;
2092 SCEVMulExpr *S =
2093 static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2094 if (!S) {
2095 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2096 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2097 S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
2098 O, Ops.size());
2099 UniqueSCEVs.InsertNode(S, IP);
2100 }
2101 S->setNoWrapFlags(Flags);
2102 return S;
2103 }
2104
2105 /// getUDivExpr - Get a canonical unsigned division expression, or something
2106 /// simpler if possible.
getUDivExpr(const SCEV * LHS,const SCEV * RHS)2107 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
2108 const SCEV *RHS) {
2109 assert(getEffectiveSCEVType(LHS->getType()) ==
2110 getEffectiveSCEVType(RHS->getType()) &&
2111 "SCEVUDivExpr operand types don't match!");
2112
2113 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
2114 if (RHSC->getValue()->equalsInt(1))
2115 return LHS; // X udiv 1 --> x
2116 // If the denominator is zero, the result of the udiv is undefined. Don't
2117 // try to analyze it, because the resolution chosen here may differ from
2118 // the resolution chosen in other parts of the compiler.
2119 if (!RHSC->getValue()->isZero()) {
2120 // Determine if the division can be folded into the operands of
2121 // its operands.
2122 // TODO: Generalize this to non-constants by using known-bits information.
2123 Type *Ty = LHS->getType();
2124 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
2125 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ - 1;
2126 // For non-power-of-two values, effectively round the value up to the
2127 // nearest power of two.
2128 if (!RHSC->getValue()->getValue().isPowerOf2())
2129 ++MaxShiftAmt;
2130 IntegerType *ExtTy =
2131 IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
2132 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
2133 if (const SCEVConstant *Step =
2134 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this))) {
2135 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
2136 const APInt &StepInt = Step->getValue()->getValue();
2137 const APInt &DivInt = RHSC->getValue()->getValue();
2138 if (!StepInt.urem(DivInt) &&
2139 getZeroExtendExpr(AR, ExtTy) ==
2140 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2141 getZeroExtendExpr(Step, ExtTy),
2142 AR->getLoop(), SCEV::FlagAnyWrap)) {
2143 SmallVector<const SCEV *, 4> Operands;
2144 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
2145 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
2146 return getAddRecExpr(Operands, AR->getLoop(),
2147 SCEV::FlagNW);
2148 }
2149 /// Get a canonical UDivExpr for a recurrence.
2150 /// {X,+,N}/C => {Y,+,N}/C where Y=X-(X%N). Safe when C%N=0.
2151 // We can currently only fold X%N if X is constant.
2152 const SCEVConstant *StartC = dyn_cast<SCEVConstant>(AR->getStart());
2153 if (StartC && !DivInt.urem(StepInt) &&
2154 getZeroExtendExpr(AR, ExtTy) ==
2155 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
2156 getZeroExtendExpr(Step, ExtTy),
2157 AR->getLoop(), SCEV::FlagAnyWrap)) {
2158 const APInt &StartInt = StartC->getValue()->getValue();
2159 const APInt &StartRem = StartInt.urem(StepInt);
2160 if (StartRem != 0)
2161 LHS = getAddRecExpr(getConstant(StartInt - StartRem), Step,
2162 AR->getLoop(), SCEV::FlagNW);
2163 }
2164 }
2165 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
2166 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
2167 SmallVector<const SCEV *, 4> Operands;
2168 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
2169 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
2170 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
2171 // Find an operand that's safely divisible.
2172 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
2173 const SCEV *Op = M->getOperand(i);
2174 const SCEV *Div = getUDivExpr(Op, RHSC);
2175 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
2176 Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
2177 M->op_end());
2178 Operands[i] = Div;
2179 return getMulExpr(Operands);
2180 }
2181 }
2182 }
2183 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
2184 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(LHS)) {
2185 SmallVector<const SCEV *, 4> Operands;
2186 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
2187 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
2188 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
2189 Operands.clear();
2190 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2191 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
2192 if (isa<SCEVUDivExpr>(Op) ||
2193 getMulExpr(Op, RHS) != A->getOperand(i))
2194 break;
2195 Operands.push_back(Op);
2196 }
2197 if (Operands.size() == A->getNumOperands())
2198 return getAddExpr(Operands);
2199 }
2200 }
2201
2202 // Fold if both operands are constant.
2203 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
2204 Constant *LHSCV = LHSC->getValue();
2205 Constant *RHSCV = RHSC->getValue();
2206 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
2207 RHSCV)));
2208 }
2209 }
2210 }
2211
2212 FoldingSetNodeID ID;
2213 ID.AddInteger(scUDivExpr);
2214 ID.AddPointer(LHS);
2215 ID.AddPointer(RHS);
2216 void *IP = 0;
2217 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2218 SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
2219 LHS, RHS);
2220 UniqueSCEVs.InsertNode(S, IP);
2221 return S;
2222 }
2223
2224
2225 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2226 /// Simplify the expression as much as possible.
getAddRecExpr(const SCEV * Start,const SCEV * Step,const Loop * L,SCEV::NoWrapFlags Flags)2227 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
2228 const Loop *L,
2229 SCEV::NoWrapFlags Flags) {
2230 SmallVector<const SCEV *, 4> Operands;
2231 Operands.push_back(Start);
2232 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
2233 if (StepChrec->getLoop() == L) {
2234 Operands.append(StepChrec->op_begin(), StepChrec->op_end());
2235 return getAddRecExpr(Operands, L, maskFlags(Flags, SCEV::FlagNW));
2236 }
2237
2238 Operands.push_back(Step);
2239 return getAddRecExpr(Operands, L, Flags);
2240 }
2241
2242 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
2243 /// Simplify the expression as much as possible.
2244 const SCEV *
getAddRecExpr(SmallVectorImpl<const SCEV * > & Operands,const Loop * L,SCEV::NoWrapFlags Flags)2245 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
2246 const Loop *L, SCEV::NoWrapFlags Flags) {
2247 if (Operands.size() == 1) return Operands[0];
2248 #ifndef NDEBUG
2249 Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
2250 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
2251 assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
2252 "SCEVAddRecExpr operand types don't match!");
2253 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2254 assert(isLoopInvariant(Operands[i], L) &&
2255 "SCEVAddRecExpr operand is not loop-invariant!");
2256 #endif
2257
2258 if (Operands.back()->isZero()) {
2259 Operands.pop_back();
2260 return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
2261 }
2262
2263 // It's tempting to want to call getMaxBackedgeTakenCount count here and
2264 // use that information to infer NUW and NSW flags. However, computing a
2265 // BE count requires calling getAddRecExpr, so we may not yet have a
2266 // meaningful BE count at this point (and if we don't, we'd be stuck
2267 // with a SCEVCouldNotCompute as the cached BE count).
2268
2269 // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
2270 // And vice-versa.
2271 int SignOrUnsignMask = SCEV::FlagNUW | SCEV::FlagNSW;
2272 SCEV::NoWrapFlags SignOrUnsignWrap = maskFlags(Flags, SignOrUnsignMask);
2273 if (SignOrUnsignWrap && (SignOrUnsignWrap != SignOrUnsignMask)) {
2274 bool All = true;
2275 for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
2276 E = Operands.end(); I != E; ++I)
2277 if (!isKnownNonNegative(*I)) {
2278 All = false;
2279 break;
2280 }
2281 if (All) Flags = setFlags(Flags, (SCEV::NoWrapFlags)SignOrUnsignMask);
2282 }
2283
2284 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
2285 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
2286 const Loop *NestedLoop = NestedAR->getLoop();
2287 if (L->contains(NestedLoop) ?
2288 (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
2289 (!NestedLoop->contains(L) &&
2290 DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
2291 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
2292 NestedAR->op_end());
2293 Operands[0] = NestedAR->getStart();
2294 // AddRecs require their operands be loop-invariant with respect to their
2295 // loops. Don't perform this transformation if it would break this
2296 // requirement.
2297 bool AllInvariant = true;
2298 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2299 if (!isLoopInvariant(Operands[i], L)) {
2300 AllInvariant = false;
2301 break;
2302 }
2303 if (AllInvariant) {
2304 // Create a recurrence for the outer loop with the same step size.
2305 //
2306 // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
2307 // inner recurrence has the same property.
2308 SCEV::NoWrapFlags OuterFlags =
2309 maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
2310
2311 NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
2312 AllInvariant = true;
2313 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
2314 if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
2315 AllInvariant = false;
2316 break;
2317 }
2318 if (AllInvariant) {
2319 // Ok, both add recurrences are valid after the transformation.
2320 //
2321 // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
2322 // the outer recurrence has the same property.
2323 SCEV::NoWrapFlags InnerFlags =
2324 maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
2325 return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
2326 }
2327 }
2328 // Reset Operands to its original state.
2329 Operands[0] = NestedAR;
2330 }
2331 }
2332
2333 // Okay, it looks like we really DO need an addrec expr. Check to see if we
2334 // already have one, otherwise create a new one.
2335 FoldingSetNodeID ID;
2336 ID.AddInteger(scAddRecExpr);
2337 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
2338 ID.AddPointer(Operands[i]);
2339 ID.AddPointer(L);
2340 void *IP = 0;
2341 SCEVAddRecExpr *S =
2342 static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
2343 if (!S) {
2344 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
2345 std::uninitialized_copy(Operands.begin(), Operands.end(), O);
2346 S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
2347 O, Operands.size(), L);
2348 UniqueSCEVs.InsertNode(S, IP);
2349 }
2350 S->setNoWrapFlags(Flags);
2351 return S;
2352 }
2353
getSMaxExpr(const SCEV * LHS,const SCEV * RHS)2354 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
2355 const SCEV *RHS) {
2356 SmallVector<const SCEV *, 2> Ops;
2357 Ops.push_back(LHS);
2358 Ops.push_back(RHS);
2359 return getSMaxExpr(Ops);
2360 }
2361
2362 const SCEV *
getSMaxExpr(SmallVectorImpl<const SCEV * > & Ops)2363 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2364 assert(!Ops.empty() && "Cannot get empty smax!");
2365 if (Ops.size() == 1) return Ops[0];
2366 #ifndef NDEBUG
2367 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2368 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2369 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2370 "SCEVSMaxExpr operand types don't match!");
2371 #endif
2372
2373 // Sort by complexity, this groups all similar expression types together.
2374 GroupByComplexity(Ops, LI);
2375
2376 // If there are any constants, fold them together.
2377 unsigned Idx = 0;
2378 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2379 ++Idx;
2380 assert(Idx < Ops.size());
2381 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2382 // We found two constants, fold them together!
2383 ConstantInt *Fold = ConstantInt::get(getContext(),
2384 APIntOps::smax(LHSC->getValue()->getValue(),
2385 RHSC->getValue()->getValue()));
2386 Ops[0] = getConstant(Fold);
2387 Ops.erase(Ops.begin()+1); // Erase the folded element
2388 if (Ops.size() == 1) return Ops[0];
2389 LHSC = cast<SCEVConstant>(Ops[0]);
2390 }
2391
2392 // If we are left with a constant minimum-int, strip it off.
2393 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
2394 Ops.erase(Ops.begin());
2395 --Idx;
2396 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
2397 // If we have an smax with a constant maximum-int, it will always be
2398 // maximum-int.
2399 return Ops[0];
2400 }
2401
2402 if (Ops.size() == 1) return Ops[0];
2403 }
2404
2405 // Find the first SMax
2406 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
2407 ++Idx;
2408
2409 // Check to see if one of the operands is an SMax. If so, expand its operands
2410 // onto our operand list, and recurse to simplify.
2411 if (Idx < Ops.size()) {
2412 bool DeletedSMax = false;
2413 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
2414 Ops.erase(Ops.begin()+Idx);
2415 Ops.append(SMax->op_begin(), SMax->op_end());
2416 DeletedSMax = true;
2417 }
2418
2419 if (DeletedSMax)
2420 return getSMaxExpr(Ops);
2421 }
2422
2423 // Okay, check to see if the same value occurs in the operand list twice. If
2424 // so, delete one. Since we sorted the list, these values are required to
2425 // be adjacent.
2426 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2427 // X smax Y smax Y --> X smax Y
2428 // X smax Y --> X, if X is always greater than Y
2429 if (Ops[i] == Ops[i+1] ||
2430 isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
2431 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2432 --i; --e;
2433 } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
2434 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2435 --i; --e;
2436 }
2437
2438 if (Ops.size() == 1) return Ops[0];
2439
2440 assert(!Ops.empty() && "Reduced smax down to nothing!");
2441
2442 // Okay, it looks like we really DO need an smax expr. Check to see if we
2443 // already have one, otherwise create a new one.
2444 FoldingSetNodeID ID;
2445 ID.AddInteger(scSMaxExpr);
2446 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2447 ID.AddPointer(Ops[i]);
2448 void *IP = 0;
2449 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2450 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2451 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2452 SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
2453 O, Ops.size());
2454 UniqueSCEVs.InsertNode(S, IP);
2455 return S;
2456 }
2457
getUMaxExpr(const SCEV * LHS,const SCEV * RHS)2458 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
2459 const SCEV *RHS) {
2460 SmallVector<const SCEV *, 2> Ops;
2461 Ops.push_back(LHS);
2462 Ops.push_back(RHS);
2463 return getUMaxExpr(Ops);
2464 }
2465
2466 const SCEV *
getUMaxExpr(SmallVectorImpl<const SCEV * > & Ops)2467 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
2468 assert(!Ops.empty() && "Cannot get empty umax!");
2469 if (Ops.size() == 1) return Ops[0];
2470 #ifndef NDEBUG
2471 Type *ETy = getEffectiveSCEVType(Ops[0]->getType());
2472 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
2473 assert(getEffectiveSCEVType(Ops[i]->getType()) == ETy &&
2474 "SCEVUMaxExpr operand types don't match!");
2475 #endif
2476
2477 // Sort by complexity, this groups all similar expression types together.
2478 GroupByComplexity(Ops, LI);
2479
2480 // If there are any constants, fold them together.
2481 unsigned Idx = 0;
2482 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
2483 ++Idx;
2484 assert(Idx < Ops.size());
2485 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
2486 // We found two constants, fold them together!
2487 ConstantInt *Fold = ConstantInt::get(getContext(),
2488 APIntOps::umax(LHSC->getValue()->getValue(),
2489 RHSC->getValue()->getValue()));
2490 Ops[0] = getConstant(Fold);
2491 Ops.erase(Ops.begin()+1); // Erase the folded element
2492 if (Ops.size() == 1) return Ops[0];
2493 LHSC = cast<SCEVConstant>(Ops[0]);
2494 }
2495
2496 // If we are left with a constant minimum-int, strip it off.
2497 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
2498 Ops.erase(Ops.begin());
2499 --Idx;
2500 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
2501 // If we have an umax with a constant maximum-int, it will always be
2502 // maximum-int.
2503 return Ops[0];
2504 }
2505
2506 if (Ops.size() == 1) return Ops[0];
2507 }
2508
2509 // Find the first UMax
2510 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
2511 ++Idx;
2512
2513 // Check to see if one of the operands is a UMax. If so, expand its operands
2514 // onto our operand list, and recurse to simplify.
2515 if (Idx < Ops.size()) {
2516 bool DeletedUMax = false;
2517 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
2518 Ops.erase(Ops.begin()+Idx);
2519 Ops.append(UMax->op_begin(), UMax->op_end());
2520 DeletedUMax = true;
2521 }
2522
2523 if (DeletedUMax)
2524 return getUMaxExpr(Ops);
2525 }
2526
2527 // Okay, check to see if the same value occurs in the operand list twice. If
2528 // so, delete one. Since we sorted the list, these values are required to
2529 // be adjacent.
2530 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
2531 // X umax Y umax Y --> X umax Y
2532 // X umax Y --> X, if X is always greater than Y
2533 if (Ops[i] == Ops[i+1] ||
2534 isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
2535 Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
2536 --i; --e;
2537 } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
2538 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
2539 --i; --e;
2540 }
2541
2542 if (Ops.size() == 1) return Ops[0];
2543
2544 assert(!Ops.empty() && "Reduced umax down to nothing!");
2545
2546 // Okay, it looks like we really DO need a umax expr. Check to see if we
2547 // already have one, otherwise create a new one.
2548 FoldingSetNodeID ID;
2549 ID.AddInteger(scUMaxExpr);
2550 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
2551 ID.AddPointer(Ops[i]);
2552 void *IP = 0;
2553 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
2554 const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
2555 std::uninitialized_copy(Ops.begin(), Ops.end(), O);
2556 SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
2557 O, Ops.size());
2558 UniqueSCEVs.InsertNode(S, IP);
2559 return S;
2560 }
2561
getSMinExpr(const SCEV * LHS,const SCEV * RHS)2562 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
2563 const SCEV *RHS) {
2564 // ~smax(~x, ~y) == smin(x, y).
2565 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2566 }
2567
getUMinExpr(const SCEV * LHS,const SCEV * RHS)2568 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
2569 const SCEV *RHS) {
2570 // ~umax(~x, ~y) == umin(x, y)
2571 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
2572 }
2573
getSizeOfExpr(Type * AllocTy)2574 const SCEV *ScalarEvolution::getSizeOfExpr(Type *AllocTy) {
2575 // If we have TargetData, we can bypass creating a target-independent
2576 // constant expression and then folding it back into a ConstantInt.
2577 // This is just a compile-time optimization.
2578 if (TD)
2579 return getConstant(TD->getIntPtrType(getContext()),
2580 TD->getTypeAllocSize(AllocTy));
2581
2582 Constant *C = ConstantExpr::getSizeOf(AllocTy);
2583 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2584 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2585 C = Folded;
2586 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2587 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2588 }
2589
getAlignOfExpr(Type * AllocTy)2590 const SCEV *ScalarEvolution::getAlignOfExpr(Type *AllocTy) {
2591 Constant *C = ConstantExpr::getAlignOf(AllocTy);
2592 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2593 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2594 C = Folded;
2595 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
2596 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2597 }
2598
getOffsetOfExpr(StructType * STy,unsigned FieldNo)2599 const SCEV *ScalarEvolution::getOffsetOfExpr(StructType *STy,
2600 unsigned FieldNo) {
2601 // If we have TargetData, we can bypass creating a target-independent
2602 // constant expression and then folding it back into a ConstantInt.
2603 // This is just a compile-time optimization.
2604 if (TD)
2605 return getConstant(TD->getIntPtrType(getContext()),
2606 TD->getStructLayout(STy)->getElementOffset(FieldNo));
2607
2608 Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
2609 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2610 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2611 C = Folded;
2612 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
2613 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2614 }
2615
getOffsetOfExpr(Type * CTy,Constant * FieldNo)2616 const SCEV *ScalarEvolution::getOffsetOfExpr(Type *CTy,
2617 Constant *FieldNo) {
2618 Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
2619 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
2620 if (Constant *Folded = ConstantFoldConstantExpression(CE, TD))
2621 C = Folded;
2622 Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
2623 return getTruncateOrZeroExtend(getSCEV(C), Ty);
2624 }
2625
getUnknown(Value * V)2626 const SCEV *ScalarEvolution::getUnknown(Value *V) {
2627 // Don't attempt to do anything other than create a SCEVUnknown object
2628 // here. createSCEV only calls getUnknown after checking for all other
2629 // interesting possibilities, and any other code that calls getUnknown
2630 // is doing so in order to hide a value from SCEV canonicalization.
2631
2632 FoldingSetNodeID ID;
2633 ID.AddInteger(scUnknown);
2634 ID.AddPointer(V);
2635 void *IP = 0;
2636 if (SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) {
2637 assert(cast<SCEVUnknown>(S)->getValue() == V &&
2638 "Stale SCEVUnknown in uniquing map!");
2639 return S;
2640 }
2641 SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V, this,
2642 FirstUnknown);
2643 FirstUnknown = cast<SCEVUnknown>(S);
2644 UniqueSCEVs.InsertNode(S, IP);
2645 return S;
2646 }
2647
2648 //===----------------------------------------------------------------------===//
2649 // Basic SCEV Analysis and PHI Idiom Recognition Code
2650 //
2651
2652 /// isSCEVable - Test if values of the given type are analyzable within
2653 /// the SCEV framework. This primarily includes integer types, and it
2654 /// can optionally include pointer types if the ScalarEvolution class
2655 /// has access to target-specific information.
isSCEVable(Type * Ty) const2656 bool ScalarEvolution::isSCEVable(Type *Ty) const {
2657 // Integers and pointers are always SCEVable.
2658 return Ty->isIntegerTy() || Ty->isPointerTy();
2659 }
2660
2661 /// getTypeSizeInBits - Return the size in bits of the specified type,
2662 /// for which isSCEVable must return true.
getTypeSizeInBits(Type * Ty) const2663 uint64_t ScalarEvolution::getTypeSizeInBits(Type *Ty) const {
2664 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2665
2666 // If we have a TargetData, use it!
2667 if (TD)
2668 return TD->getTypeSizeInBits(Ty);
2669
2670 // Integer types have fixed sizes.
2671 if (Ty->isIntegerTy())
2672 return Ty->getPrimitiveSizeInBits();
2673
2674 // The only other support type is pointer. Without TargetData, conservatively
2675 // assume pointers are 64-bit.
2676 assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
2677 return 64;
2678 }
2679
2680 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2681 /// the given type and which represents how SCEV will treat the given
2682 /// type, for which isSCEVable must return true. For pointer types,
2683 /// this is the pointer-sized integer type.
getEffectiveSCEVType(Type * Ty) const2684 Type *ScalarEvolution::getEffectiveSCEVType(Type *Ty) const {
2685 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2686
2687 if (Ty->isIntegerTy())
2688 return Ty;
2689
2690 // The only other support type is pointer.
2691 assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
2692 if (TD) return TD->getIntPtrType(getContext());
2693
2694 // Without TargetData, conservatively assume pointers are 64-bit.
2695 return Type::getInt64Ty(getContext());
2696 }
2697
getCouldNotCompute()2698 const SCEV *ScalarEvolution::getCouldNotCompute() {
2699 return &CouldNotCompute;
2700 }
2701
2702 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2703 /// expression and create a new one.
getSCEV(Value * V)2704 const SCEV *ScalarEvolution::getSCEV(Value *V) {
2705 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2706
2707 ValueExprMapType::const_iterator I = ValueExprMap.find(V);
2708 if (I != ValueExprMap.end()) return I->second;
2709 const SCEV *S = createSCEV(V);
2710
2711 // The process of creating a SCEV for V may have caused other SCEVs
2712 // to have been created, so it's necessary to insert the new entry
2713 // from scratch, rather than trying to remember the insert position
2714 // above.
2715 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2716 return S;
2717 }
2718
2719 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2720 ///
getNegativeSCEV(const SCEV * V)2721 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2722 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2723 return getConstant(
2724 cast<ConstantInt>(ConstantExpr::getNeg(VC->getValue())));
2725
2726 Type *Ty = V->getType();
2727 Ty = getEffectiveSCEVType(Ty);
2728 return getMulExpr(V,
2729 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty))));
2730 }
2731
2732 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
getNotSCEV(const SCEV * V)2733 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2734 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2735 return getConstant(
2736 cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2737
2738 Type *Ty = V->getType();
2739 Ty = getEffectiveSCEVType(Ty);
2740 const SCEV *AllOnes =
2741 getConstant(cast<ConstantInt>(Constant::getAllOnesValue(Ty)));
2742 return getMinusSCEV(AllOnes, V);
2743 }
2744
2745 /// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
getMinusSCEV(const SCEV * LHS,const SCEV * RHS,SCEV::NoWrapFlags Flags)2746 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
2747 SCEV::NoWrapFlags Flags) {
2748 assert(!maskFlags(Flags, SCEV::FlagNUW) && "subtraction does not have NUW");
2749
2750 // Fast path: X - X --> 0.
2751 if (LHS == RHS)
2752 return getConstant(LHS->getType(), 0);
2753
2754 // X - Y --> X + -Y
2755 return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
2756 }
2757
2758 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2759 /// input value to the specified type. If the type must be extended, it is zero
2760 /// extended.
2761 const SCEV *
getTruncateOrZeroExtend(const SCEV * V,Type * Ty)2762 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, Type *Ty) {
2763 Type *SrcTy = V->getType();
2764 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2765 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2766 "Cannot truncate or zero extend with non-integer arguments!");
2767 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2768 return V; // No conversion
2769 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2770 return getTruncateExpr(V, Ty);
2771 return getZeroExtendExpr(V, Ty);
2772 }
2773
2774 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2775 /// input value to the specified type. If the type must be extended, it is sign
2776 /// extended.
2777 const SCEV *
getTruncateOrSignExtend(const SCEV * V,Type * Ty)2778 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2779 Type *Ty) {
2780 Type *SrcTy = V->getType();
2781 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2782 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2783 "Cannot truncate or zero extend with non-integer arguments!");
2784 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2785 return V; // No conversion
2786 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2787 return getTruncateExpr(V, Ty);
2788 return getSignExtendExpr(V, Ty);
2789 }
2790
2791 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2792 /// input value to the specified type. If the type must be extended, it is zero
2793 /// extended. The conversion must not be narrowing.
2794 const SCEV *
getNoopOrZeroExtend(const SCEV * V,Type * Ty)2795 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, Type *Ty) {
2796 Type *SrcTy = V->getType();
2797 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2798 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2799 "Cannot noop or zero extend with non-integer arguments!");
2800 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2801 "getNoopOrZeroExtend cannot truncate!");
2802 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2803 return V; // No conversion
2804 return getZeroExtendExpr(V, Ty);
2805 }
2806
2807 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2808 /// input value to the specified type. If the type must be extended, it is sign
2809 /// extended. The conversion must not be narrowing.
2810 const SCEV *
getNoopOrSignExtend(const SCEV * V,Type * Ty)2811 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, Type *Ty) {
2812 Type *SrcTy = V->getType();
2813 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2814 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2815 "Cannot noop or sign extend with non-integer arguments!");
2816 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2817 "getNoopOrSignExtend cannot truncate!");
2818 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2819 return V; // No conversion
2820 return getSignExtendExpr(V, Ty);
2821 }
2822
2823 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2824 /// the input value to the specified type. If the type must be extended,
2825 /// it is extended with unspecified bits. The conversion must not be
2826 /// narrowing.
2827 const SCEV *
getNoopOrAnyExtend(const SCEV * V,Type * Ty)2828 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, Type *Ty) {
2829 Type *SrcTy = V->getType();
2830 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2831 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2832 "Cannot noop or any extend with non-integer arguments!");
2833 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2834 "getNoopOrAnyExtend cannot truncate!");
2835 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2836 return V; // No conversion
2837 return getAnyExtendExpr(V, Ty);
2838 }
2839
2840 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2841 /// input value to the specified type. The conversion must not be widening.
2842 const SCEV *
getTruncateOrNoop(const SCEV * V,Type * Ty)2843 ScalarEvolution::getTruncateOrNoop(const SCEV *V, Type *Ty) {
2844 Type *SrcTy = V->getType();
2845 assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
2846 (Ty->isIntegerTy() || Ty->isPointerTy()) &&
2847 "Cannot truncate or noop with non-integer arguments!");
2848 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2849 "getTruncateOrNoop cannot extend!");
2850 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2851 return V; // No conversion
2852 return getTruncateExpr(V, Ty);
2853 }
2854
2855 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2856 /// the types using zero-extension, and then perform a umax operation
2857 /// with them.
getUMaxFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)2858 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2859 const SCEV *RHS) {
2860 const SCEV *PromotedLHS = LHS;
2861 const SCEV *PromotedRHS = RHS;
2862
2863 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2864 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2865 else
2866 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2867
2868 return getUMaxExpr(PromotedLHS, PromotedRHS);
2869 }
2870
2871 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2872 /// the types using zero-extension, and then perform a umin operation
2873 /// with them.
getUMinFromMismatchedTypes(const SCEV * LHS,const SCEV * RHS)2874 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2875 const SCEV *RHS) {
2876 const SCEV *PromotedLHS = LHS;
2877 const SCEV *PromotedRHS = RHS;
2878
2879 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2880 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2881 else
2882 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2883
2884 return getUMinExpr(PromotedLHS, PromotedRHS);
2885 }
2886
2887 /// getPointerBase - Transitively follow the chain of pointer-type operands
2888 /// until reaching a SCEV that does not have a single pointer operand. This
2889 /// returns a SCEVUnknown pointer for well-formed pointer-type expressions,
2890 /// but corner cases do exist.
getPointerBase(const SCEV * V)2891 const SCEV *ScalarEvolution::getPointerBase(const SCEV *V) {
2892 // A pointer operand may evaluate to a nonpointer expression, such as null.
2893 if (!V->getType()->isPointerTy())
2894 return V;
2895
2896 if (const SCEVCastExpr *Cast = dyn_cast<SCEVCastExpr>(V)) {
2897 return getPointerBase(Cast->getOperand());
2898 }
2899 else if (const SCEVNAryExpr *NAry = dyn_cast<SCEVNAryExpr>(V)) {
2900 const SCEV *PtrOp = 0;
2901 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
2902 I != E; ++I) {
2903 if ((*I)->getType()->isPointerTy()) {
2904 // Cannot find the base of an expression with multiple pointer operands.
2905 if (PtrOp)
2906 return V;
2907 PtrOp = *I;
2908 }
2909 }
2910 if (!PtrOp)
2911 return V;
2912 return getPointerBase(PtrOp);
2913 }
2914 return V;
2915 }
2916
2917 /// PushDefUseChildren - Push users of the given Instruction
2918 /// onto the given Worklist.
2919 static void
PushDefUseChildren(Instruction * I,SmallVectorImpl<Instruction * > & Worklist)2920 PushDefUseChildren(Instruction *I,
2921 SmallVectorImpl<Instruction *> &Worklist) {
2922 // Push the def-use children onto the Worklist stack.
2923 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2924 UI != UE; ++UI)
2925 Worklist.push_back(cast<Instruction>(*UI));
2926 }
2927
2928 /// ForgetSymbolicValue - This looks up computed SCEV values for all
2929 /// instructions that depend on the given instruction and removes them from
2930 /// the ValueExprMapType map if they reference SymName. This is used during PHI
2931 /// resolution.
2932 void
ForgetSymbolicName(Instruction * PN,const SCEV * SymName)2933 ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
2934 SmallVector<Instruction *, 16> Worklist;
2935 PushDefUseChildren(PN, Worklist);
2936
2937 SmallPtrSet<Instruction *, 8> Visited;
2938 Visited.insert(PN);
2939 while (!Worklist.empty()) {
2940 Instruction *I = Worklist.pop_back_val();
2941 if (!Visited.insert(I)) continue;
2942
2943 ValueExprMapType::iterator It =
2944 ValueExprMap.find(static_cast<Value *>(I));
2945 if (It != ValueExprMap.end()) {
2946 const SCEV *Old = It->second;
2947
2948 // Short-circuit the def-use traversal if the symbolic name
2949 // ceases to appear in expressions.
2950 if (Old != SymName && !hasOperand(Old, SymName))
2951 continue;
2952
2953 // SCEVUnknown for a PHI either means that it has an unrecognized
2954 // structure, it's a PHI that's in the progress of being computed
2955 // by createNodeForPHI, or it's a single-value PHI. In the first case,
2956 // additional loop trip count information isn't going to change anything.
2957 // In the second case, createNodeForPHI will perform the necessary
2958 // updates on its own when it gets to that point. In the third, we do
2959 // want to forget the SCEVUnknown.
2960 if (!isa<PHINode>(I) ||
2961 !isa<SCEVUnknown>(Old) ||
2962 (I != PN && Old == SymName)) {
2963 forgetMemoizedResults(Old);
2964 ValueExprMap.erase(It);
2965 }
2966 }
2967
2968 PushDefUseChildren(I, Worklist);
2969 }
2970 }
2971
2972 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
2973 /// a loop header, making it a potential recurrence, or it doesn't.
2974 ///
createNodeForPHI(PHINode * PN)2975 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2976 if (const Loop *L = LI->getLoopFor(PN->getParent()))
2977 if (L->getHeader() == PN->getParent()) {
2978 // The loop may have multiple entrances or multiple exits; we can analyze
2979 // this phi as an addrec if it has a unique entry value and a unique
2980 // backedge value.
2981 Value *BEValueV = 0, *StartValueV = 0;
2982 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2983 Value *V = PN->getIncomingValue(i);
2984 if (L->contains(PN->getIncomingBlock(i))) {
2985 if (!BEValueV) {
2986 BEValueV = V;
2987 } else if (BEValueV != V) {
2988 BEValueV = 0;
2989 break;
2990 }
2991 } else if (!StartValueV) {
2992 StartValueV = V;
2993 } else if (StartValueV != V) {
2994 StartValueV = 0;
2995 break;
2996 }
2997 }
2998 if (BEValueV && StartValueV) {
2999 // While we are analyzing this PHI node, handle its value symbolically.
3000 const SCEV *SymbolicName = getUnknown(PN);
3001 assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
3002 "PHI node already processed?");
3003 ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
3004
3005 // Using this symbolic name for the PHI, analyze the value coming around
3006 // the back-edge.
3007 const SCEV *BEValue = getSCEV(BEValueV);
3008
3009 // NOTE: If BEValue is loop invariant, we know that the PHI node just
3010 // has a special value for the first iteration of the loop.
3011
3012 // If the value coming around the backedge is an add with the symbolic
3013 // value we just inserted, then we found a simple induction variable!
3014 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
3015 // If there is a single occurrence of the symbolic value, replace it
3016 // with a recurrence.
3017 unsigned FoundIndex = Add->getNumOperands();
3018 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3019 if (Add->getOperand(i) == SymbolicName)
3020 if (FoundIndex == e) {
3021 FoundIndex = i;
3022 break;
3023 }
3024
3025 if (FoundIndex != Add->getNumOperands()) {
3026 // Create an add with everything but the specified operand.
3027 SmallVector<const SCEV *, 8> Ops;
3028 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
3029 if (i != FoundIndex)
3030 Ops.push_back(Add->getOperand(i));
3031 const SCEV *Accum = getAddExpr(Ops);
3032
3033 // This is not a valid addrec if the step amount is varying each
3034 // loop iteration, but is not itself an addrec in this loop.
3035 if (isLoopInvariant(Accum, L) ||
3036 (isa<SCEVAddRecExpr>(Accum) &&
3037 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
3038 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
3039
3040 // If the increment doesn't overflow, then neither the addrec nor
3041 // the post-increment will overflow.
3042 if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
3043 if (OBO->hasNoUnsignedWrap())
3044 Flags = setFlags(Flags, SCEV::FlagNUW);
3045 if (OBO->hasNoSignedWrap())
3046 Flags = setFlags(Flags, SCEV::FlagNSW);
3047 } else if (const GEPOperator *GEP =
3048 dyn_cast<GEPOperator>(BEValueV)) {
3049 // If the increment is an inbounds GEP, then we know the address
3050 // space cannot be wrapped around. We cannot make any guarantee
3051 // about signed or unsigned overflow because pointers are
3052 // unsigned but we may have a negative index from the base
3053 // pointer.
3054 if (GEP->isInBounds())
3055 Flags = setFlags(Flags, SCEV::FlagNW);
3056 }
3057
3058 const SCEV *StartVal = getSCEV(StartValueV);
3059 const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
3060
3061 // Since the no-wrap flags are on the increment, they apply to the
3062 // post-incremented value as well.
3063 if (isLoopInvariant(Accum, L))
3064 (void)getAddRecExpr(getAddExpr(StartVal, Accum),
3065 Accum, L, Flags);
3066
3067 // Okay, for the entire analysis of this edge we assumed the PHI
3068 // to be symbolic. We now need to go back and purge all of the
3069 // entries for the scalars that use the symbolic expression.
3070 ForgetSymbolicName(PN, SymbolicName);
3071 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3072 return PHISCEV;
3073 }
3074 }
3075 } else if (const SCEVAddRecExpr *AddRec =
3076 dyn_cast<SCEVAddRecExpr>(BEValue)) {
3077 // Otherwise, this could be a loop like this:
3078 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
3079 // In this case, j = {1,+,1} and BEValue is j.
3080 // Because the other in-value of i (0) fits the evolution of BEValue
3081 // i really is an addrec evolution.
3082 if (AddRec->getLoop() == L && AddRec->isAffine()) {
3083 const SCEV *StartVal = getSCEV(StartValueV);
3084
3085 // If StartVal = j.start - j.stride, we can use StartVal as the
3086 // initial step of the addrec evolution.
3087 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
3088 AddRec->getOperand(1))) {
3089 // FIXME: For constant StartVal, we should be able to infer
3090 // no-wrap flags.
3091 const SCEV *PHISCEV =
3092 getAddRecExpr(StartVal, AddRec->getOperand(1), L,
3093 SCEV::FlagAnyWrap);
3094
3095 // Okay, for the entire analysis of this edge we assumed the PHI
3096 // to be symbolic. We now need to go back and purge all of the
3097 // entries for the scalars that use the symbolic expression.
3098 ForgetSymbolicName(PN, SymbolicName);
3099 ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
3100 return PHISCEV;
3101 }
3102 }
3103 }
3104 }
3105 }
3106
3107 // If the PHI has a single incoming value, follow that value, unless the
3108 // PHI's incoming blocks are in a different loop, in which case doing so
3109 // risks breaking LCSSA form. Instcombine would normally zap these, but
3110 // it doesn't have DominatorTree information, so it may miss cases.
3111 if (Value *V = SimplifyInstruction(PN, TD, DT))
3112 if (LI->replacementPreservesLCSSAForm(PN, V))
3113 return getSCEV(V);
3114
3115 // If it's not a loop phi, we can't handle it yet.
3116 return getUnknown(PN);
3117 }
3118
3119 /// createNodeForGEP - Expand GEP instructions into add and multiply
3120 /// operations. This allows them to be analyzed by regular SCEV code.
3121 ///
createNodeForGEP(GEPOperator * GEP)3122 const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
3123
3124 // Don't blindly transfer the inbounds flag from the GEP instruction to the
3125 // Add expression, because the Instruction may be guarded by control flow
3126 // and the no-overflow bits may not be valid for the expression in any
3127 // context.
3128 bool isInBounds = GEP->isInBounds();
3129
3130 Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
3131 Value *Base = GEP->getOperand(0);
3132 // Don't attempt to analyze GEPs over unsized objects.
3133 if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
3134 return getUnknown(GEP);
3135 const SCEV *TotalOffset = getConstant(IntPtrTy, 0);
3136 gep_type_iterator GTI = gep_type_begin(GEP);
3137 for (GetElementPtrInst::op_iterator I = llvm::next(GEP->op_begin()),
3138 E = GEP->op_end();
3139 I != E; ++I) {
3140 Value *Index = *I;
3141 // Compute the (potentially symbolic) offset in bytes for this index.
3142 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
3143 // For a struct, add the member offset.
3144 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
3145 const SCEV *FieldOffset = getOffsetOfExpr(STy, FieldNo);
3146
3147 // Add the field offset to the running total offset.
3148 TotalOffset = getAddExpr(TotalOffset, FieldOffset);
3149 } else {
3150 // For an array, add the element offset, explicitly scaled.
3151 const SCEV *ElementSize = getSizeOfExpr(*GTI);
3152 const SCEV *IndexS = getSCEV(Index);
3153 // Getelementptr indices are signed.
3154 IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
3155
3156 // Multiply the index by the element size to compute the element offset.
3157 const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize,
3158 isInBounds ? SCEV::FlagNSW :
3159 SCEV::FlagAnyWrap);
3160
3161 // Add the element offset to the running total offset.
3162 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
3163 }
3164 }
3165
3166 // Get the SCEV for the GEP base.
3167 const SCEV *BaseS = getSCEV(Base);
3168
3169 // Add the total offset from all the GEP indices to the base.
3170 return getAddExpr(BaseS, TotalOffset,
3171 isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap);
3172 }
3173
3174 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
3175 /// guaranteed to end in (at every loop iteration). It is, at the same time,
3176 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
3177 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
3178 uint32_t
GetMinTrailingZeros(const SCEV * S)3179 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
3180 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3181 return C->getValue()->getValue().countTrailingZeros();
3182
3183 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
3184 return std::min(GetMinTrailingZeros(T->getOperand()),
3185 (uint32_t)getTypeSizeInBits(T->getType()));
3186
3187 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
3188 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3189 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3190 getTypeSizeInBits(E->getType()) : OpRes;
3191 }
3192
3193 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
3194 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
3195 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
3196 getTypeSizeInBits(E->getType()) : OpRes;
3197 }
3198
3199 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
3200 // The result is the min of all operands results.
3201 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3202 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3203 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3204 return MinOpRes;
3205 }
3206
3207 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
3208 // The result is the sum of all operands results.
3209 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
3210 uint32_t BitWidth = getTypeSizeInBits(M->getType());
3211 for (unsigned i = 1, e = M->getNumOperands();
3212 SumOpRes != BitWidth && i != e; ++i)
3213 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
3214 BitWidth);
3215 return SumOpRes;
3216 }
3217
3218 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
3219 // The result is the min of all operands results.
3220 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
3221 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
3222 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
3223 return MinOpRes;
3224 }
3225
3226 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
3227 // The result is the min of all operands results.
3228 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3229 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3230 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3231 return MinOpRes;
3232 }
3233
3234 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
3235 // The result is the min of all operands results.
3236 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
3237 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
3238 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
3239 return MinOpRes;
3240 }
3241
3242 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3243 // For a SCEVUnknown, ask ValueTracking.
3244 unsigned BitWidth = getTypeSizeInBits(U->getType());
3245 APInt Mask = APInt::getAllOnesValue(BitWidth);
3246 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3247 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
3248 return Zeros.countTrailingOnes();
3249 }
3250
3251 // SCEVUDivExpr
3252 return 0;
3253 }
3254
3255 /// getUnsignedRange - Determine the unsigned range for a particular SCEV.
3256 ///
3257 ConstantRange
getUnsignedRange(const SCEV * S)3258 ScalarEvolution::getUnsignedRange(const SCEV *S) {
3259 // See if we've computed this range already.
3260 DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
3261 if (I != UnsignedRanges.end())
3262 return I->second;
3263
3264 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3265 return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
3266
3267 unsigned BitWidth = getTypeSizeInBits(S->getType());
3268 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3269
3270 // If the value has known zeros, the maximum unsigned value will have those
3271 // known zeros as well.
3272 uint32_t TZ = GetMinTrailingZeros(S);
3273 if (TZ != 0)
3274 ConservativeResult =
3275 ConstantRange(APInt::getMinValue(BitWidth),
3276 APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
3277
3278 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3279 ConstantRange X = getUnsignedRange(Add->getOperand(0));
3280 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3281 X = X.add(getUnsignedRange(Add->getOperand(i)));
3282 return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
3283 }
3284
3285 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3286 ConstantRange X = getUnsignedRange(Mul->getOperand(0));
3287 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3288 X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
3289 return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
3290 }
3291
3292 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3293 ConstantRange X = getUnsignedRange(SMax->getOperand(0));
3294 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3295 X = X.smax(getUnsignedRange(SMax->getOperand(i)));
3296 return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
3297 }
3298
3299 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3300 ConstantRange X = getUnsignedRange(UMax->getOperand(0));
3301 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3302 X = X.umax(getUnsignedRange(UMax->getOperand(i)));
3303 return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
3304 }
3305
3306 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3307 ConstantRange X = getUnsignedRange(UDiv->getLHS());
3308 ConstantRange Y = getUnsignedRange(UDiv->getRHS());
3309 return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3310 }
3311
3312 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3313 ConstantRange X = getUnsignedRange(ZExt->getOperand());
3314 return setUnsignedRange(ZExt,
3315 ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3316 }
3317
3318 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3319 ConstantRange X = getUnsignedRange(SExt->getOperand());
3320 return setUnsignedRange(SExt,
3321 ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3322 }
3323
3324 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3325 ConstantRange X = getUnsignedRange(Trunc->getOperand());
3326 return setUnsignedRange(Trunc,
3327 ConservativeResult.intersectWith(X.truncate(BitWidth)));
3328 }
3329
3330 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3331 // If there's no unsigned wrap, the value will never be less than its
3332 // initial value.
3333 if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
3334 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
3335 if (!C->getValue()->isZero())
3336 ConservativeResult =
3337 ConservativeResult.intersectWith(
3338 ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0)));
3339
3340 // TODO: non-affine addrec
3341 if (AddRec->isAffine()) {
3342 Type *Ty = AddRec->getType();
3343 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3344 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3345 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3346 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3347
3348 const SCEV *Start = AddRec->getStart();
3349 const SCEV *Step = AddRec->getStepRecurrence(*this);
3350
3351 ConstantRange StartRange = getUnsignedRange(Start);
3352 ConstantRange StepRange = getSignedRange(Step);
3353 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3354 ConstantRange EndRange =
3355 StartRange.add(MaxBECountRange.multiply(StepRange));
3356
3357 // Check for overflow. This must be done with ConstantRange arithmetic
3358 // because we could be called from within the ScalarEvolution overflow
3359 // checking code.
3360 ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
3361 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3362 ConstantRange ExtMaxBECountRange =
3363 MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3364 ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
3365 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3366 ExtEndRange)
3367 return setUnsignedRange(AddRec, ConservativeResult);
3368
3369 APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
3370 EndRange.getUnsignedMin());
3371 APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
3372 EndRange.getUnsignedMax());
3373 if (Min.isMinValue() && Max.isMaxValue())
3374 return setUnsignedRange(AddRec, ConservativeResult);
3375 return setUnsignedRange(AddRec,
3376 ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3377 }
3378 }
3379
3380 return setUnsignedRange(AddRec, ConservativeResult);
3381 }
3382
3383 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3384 // For a SCEVUnknown, ask ValueTracking.
3385 APInt Mask = APInt::getAllOnesValue(BitWidth);
3386 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
3387 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
3388 if (Ones == ~Zeros + 1)
3389 return setUnsignedRange(U, ConservativeResult);
3390 return setUnsignedRange(U,
3391 ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
3392 }
3393
3394 return setUnsignedRange(S, ConservativeResult);
3395 }
3396
3397 /// getSignedRange - Determine the signed range for a particular SCEV.
3398 ///
3399 ConstantRange
getSignedRange(const SCEV * S)3400 ScalarEvolution::getSignedRange(const SCEV *S) {
3401 // See if we've computed this range already.
3402 DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
3403 if (I != SignedRanges.end())
3404 return I->second;
3405
3406 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
3407 return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
3408
3409 unsigned BitWidth = getTypeSizeInBits(S->getType());
3410 ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
3411
3412 // If the value has known zeros, the maximum signed value will have those
3413 // known zeros as well.
3414 uint32_t TZ = GetMinTrailingZeros(S);
3415 if (TZ != 0)
3416 ConservativeResult =
3417 ConstantRange(APInt::getSignedMinValue(BitWidth),
3418 APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
3419
3420 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
3421 ConstantRange X = getSignedRange(Add->getOperand(0));
3422 for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
3423 X = X.add(getSignedRange(Add->getOperand(i)));
3424 return setSignedRange(Add, ConservativeResult.intersectWith(X));
3425 }
3426
3427 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
3428 ConstantRange X = getSignedRange(Mul->getOperand(0));
3429 for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
3430 X = X.multiply(getSignedRange(Mul->getOperand(i)));
3431 return setSignedRange(Mul, ConservativeResult.intersectWith(X));
3432 }
3433
3434 if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
3435 ConstantRange X = getSignedRange(SMax->getOperand(0));
3436 for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
3437 X = X.smax(getSignedRange(SMax->getOperand(i)));
3438 return setSignedRange(SMax, ConservativeResult.intersectWith(X));
3439 }
3440
3441 if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
3442 ConstantRange X = getSignedRange(UMax->getOperand(0));
3443 for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
3444 X = X.umax(getSignedRange(UMax->getOperand(i)));
3445 return setSignedRange(UMax, ConservativeResult.intersectWith(X));
3446 }
3447
3448 if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
3449 ConstantRange X = getSignedRange(UDiv->getLHS());
3450 ConstantRange Y = getSignedRange(UDiv->getRHS());
3451 return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
3452 }
3453
3454 if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
3455 ConstantRange X = getSignedRange(ZExt->getOperand());
3456 return setSignedRange(ZExt,
3457 ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
3458 }
3459
3460 if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
3461 ConstantRange X = getSignedRange(SExt->getOperand());
3462 return setSignedRange(SExt,
3463 ConservativeResult.intersectWith(X.signExtend(BitWidth)));
3464 }
3465
3466 if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
3467 ConstantRange X = getSignedRange(Trunc->getOperand());
3468 return setSignedRange(Trunc,
3469 ConservativeResult.intersectWith(X.truncate(BitWidth)));
3470 }
3471
3472 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
3473 // If there's no signed wrap, and all the operands have the same sign or
3474 // zero, the value won't ever change sign.
3475 if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
3476 bool AllNonNeg = true;
3477 bool AllNonPos = true;
3478 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
3479 if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
3480 if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
3481 }
3482 if (AllNonNeg)
3483 ConservativeResult = ConservativeResult.intersectWith(
3484 ConstantRange(APInt(BitWidth, 0),
3485 APInt::getSignedMinValue(BitWidth)));
3486 else if (AllNonPos)
3487 ConservativeResult = ConservativeResult.intersectWith(
3488 ConstantRange(APInt::getSignedMinValue(BitWidth),
3489 APInt(BitWidth, 1)));
3490 }
3491
3492 // TODO: non-affine addrec
3493 if (AddRec->isAffine()) {
3494 Type *Ty = AddRec->getType();
3495 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
3496 if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
3497 getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
3498 MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
3499
3500 const SCEV *Start = AddRec->getStart();
3501 const SCEV *Step = AddRec->getStepRecurrence(*this);
3502
3503 ConstantRange StartRange = getSignedRange(Start);
3504 ConstantRange StepRange = getSignedRange(Step);
3505 ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
3506 ConstantRange EndRange =
3507 StartRange.add(MaxBECountRange.multiply(StepRange));
3508
3509 // Check for overflow. This must be done with ConstantRange arithmetic
3510 // because we could be called from within the ScalarEvolution overflow
3511 // checking code.
3512 ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
3513 ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
3514 ConstantRange ExtMaxBECountRange =
3515 MaxBECountRange.zextOrTrunc(BitWidth*2+1);
3516 ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
3517 if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
3518 ExtEndRange)
3519 return setSignedRange(AddRec, ConservativeResult);
3520
3521 APInt Min = APIntOps::smin(StartRange.getSignedMin(),
3522 EndRange.getSignedMin());
3523 APInt Max = APIntOps::smax(StartRange.getSignedMax(),
3524 EndRange.getSignedMax());
3525 if (Min.isMinSignedValue() && Max.isMaxSignedValue())
3526 return setSignedRange(AddRec, ConservativeResult);
3527 return setSignedRange(AddRec,
3528 ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
3529 }
3530 }
3531
3532 return setSignedRange(AddRec, ConservativeResult);
3533 }
3534
3535 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
3536 // For a SCEVUnknown, ask ValueTracking.
3537 if (!U->getValue()->getType()->isIntegerTy() && !TD)
3538 return setSignedRange(U, ConservativeResult);
3539 unsigned NS = ComputeNumSignBits(U->getValue(), TD);
3540 if (NS == 1)
3541 return setSignedRange(U, ConservativeResult);
3542 return setSignedRange(U, ConservativeResult.intersectWith(
3543 ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
3544 APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
3545 }
3546
3547 return setSignedRange(S, ConservativeResult);
3548 }
3549
3550 /// createSCEV - We know that there is no SCEV for the specified value.
3551 /// Analyze the expression.
3552 ///
createSCEV(Value * V)3553 const SCEV *ScalarEvolution::createSCEV(Value *V) {
3554 if (!isSCEVable(V->getType()))
3555 return getUnknown(V);
3556
3557 unsigned Opcode = Instruction::UserOp1;
3558 if (Instruction *I = dyn_cast<Instruction>(V)) {
3559 Opcode = I->getOpcode();
3560
3561 // Don't attempt to analyze instructions in blocks that aren't
3562 // reachable. Such instructions don't matter, and they aren't required
3563 // to obey basic rules for definitions dominating uses which this
3564 // analysis depends on.
3565 if (!DT->isReachableFromEntry(I->getParent()))
3566 return getUnknown(V);
3567 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
3568 Opcode = CE->getOpcode();
3569 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
3570 return getConstant(CI);
3571 else if (isa<ConstantPointerNull>(V))
3572 return getConstant(V->getType(), 0);
3573 else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
3574 return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
3575 else
3576 return getUnknown(V);
3577
3578 Operator *U = cast<Operator>(V);
3579 switch (Opcode) {
3580 case Instruction::Add: {
3581 // The simple thing to do would be to just call getSCEV on both operands
3582 // and call getAddExpr with the result. However if we're looking at a
3583 // bunch of things all added together, this can be quite inefficient,
3584 // because it leads to N-1 getAddExpr calls for N ultimate operands.
3585 // Instead, gather up all the operands and make a single getAddExpr call.
3586 // LLVM IR canonical form means we need only traverse the left operands.
3587 SmallVector<const SCEV *, 4> AddOps;
3588 AddOps.push_back(getSCEV(U->getOperand(1)));
3589 for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
3590 unsigned Opcode = Op->getValueID() - Value::InstructionVal;
3591 if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
3592 break;
3593 U = cast<Operator>(Op);
3594 const SCEV *Op1 = getSCEV(U->getOperand(1));
3595 if (Opcode == Instruction::Sub)
3596 AddOps.push_back(getNegativeSCEV(Op1));
3597 else
3598 AddOps.push_back(Op1);
3599 }
3600 AddOps.push_back(getSCEV(U->getOperand(0)));
3601 SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
3602 OverflowingBinaryOperator *OBO = cast<OverflowingBinaryOperator>(V);
3603 if (OBO->hasNoSignedWrap())
3604 setFlags(Flags, SCEV::FlagNSW);
3605 if (OBO->hasNoUnsignedWrap())
3606 setFlags(Flags, SCEV::FlagNUW);
3607 return getAddExpr(AddOps, Flags);
3608 }
3609 case Instruction::Mul: {
3610 // See the Add code above.
3611 SmallVector<const SCEV *, 4> MulOps;
3612 MulOps.push_back(getSCEV(U->getOperand(1)));
3613 for (Value *Op = U->getOperand(0);
3614 Op->getValueID() == Instruction::Mul + Value::InstructionVal;
3615 Op = U->getOperand(0)) {
3616 U = cast<Operator>(Op);
3617 MulOps.push_back(getSCEV(U->getOperand(1)));
3618 }
3619 MulOps.push_back(getSCEV(U->getOperand(0)));
3620 return getMulExpr(MulOps);
3621 }
3622 case Instruction::UDiv:
3623 return getUDivExpr(getSCEV(U->getOperand(0)),
3624 getSCEV(U->getOperand(1)));
3625 case Instruction::Sub:
3626 return getMinusSCEV(getSCEV(U->getOperand(0)),
3627 getSCEV(U->getOperand(1)));
3628 case Instruction::And:
3629 // For an expression like x&255 that merely masks off the high bits,
3630 // use zext(trunc(x)) as the SCEV expression.
3631 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3632 if (CI->isNullValue())
3633 return getSCEV(U->getOperand(1));
3634 if (CI->isAllOnesValue())
3635 return getSCEV(U->getOperand(0));
3636 const APInt &A = CI->getValue();
3637
3638 // Instcombine's ShrinkDemandedConstant may strip bits out of
3639 // constants, obscuring what would otherwise be a low-bits mask.
3640 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
3641 // knew about to reconstruct a low-bits mask value.
3642 unsigned LZ = A.countLeadingZeros();
3643 unsigned BitWidth = A.getBitWidth();
3644 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
3645 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
3646 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
3647
3648 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
3649
3650 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
3651 return
3652 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
3653 IntegerType::get(getContext(), BitWidth - LZ)),
3654 U->getType());
3655 }
3656 break;
3657
3658 case Instruction::Or:
3659 // If the RHS of the Or is a constant, we may have something like:
3660 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
3661 // optimizations will transparently handle this case.
3662 //
3663 // In order for this transformation to be safe, the LHS must be of the
3664 // form X*(2^n) and the Or constant must be less than 2^n.
3665 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3666 const SCEV *LHS = getSCEV(U->getOperand(0));
3667 const APInt &CIVal = CI->getValue();
3668 if (GetMinTrailingZeros(LHS) >=
3669 (CIVal.getBitWidth() - CIVal.countLeadingZeros())) {
3670 // Build a plain add SCEV.
3671 const SCEV *S = getAddExpr(LHS, getSCEV(CI));
3672 // If the LHS of the add was an addrec and it has no-wrap flags,
3673 // transfer the no-wrap flags, since an or won't introduce a wrap.
3674 if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
3675 const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
3676 const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
3677 OldAR->getNoWrapFlags());
3678 }
3679 return S;
3680 }
3681 }
3682 break;
3683 case Instruction::Xor:
3684 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
3685 // If the RHS of the xor is a signbit, then this is just an add.
3686 // Instcombine turns add of signbit into xor as a strength reduction step.
3687 if (CI->getValue().isSignBit())
3688 return getAddExpr(getSCEV(U->getOperand(0)),
3689 getSCEV(U->getOperand(1)));
3690
3691 // If the RHS of xor is -1, then this is a not operation.
3692 if (CI->isAllOnesValue())
3693 return getNotSCEV(getSCEV(U->getOperand(0)));
3694
3695 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
3696 // This is a variant of the check for xor with -1, and it handles
3697 // the case where instcombine has trimmed non-demanded bits out
3698 // of an xor with -1.
3699 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
3700 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
3701 if (BO->getOpcode() == Instruction::And &&
3702 LCI->getValue() == CI->getValue())
3703 if (const SCEVZeroExtendExpr *Z =
3704 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
3705 Type *UTy = U->getType();
3706 const SCEV *Z0 = Z->getOperand();
3707 Type *Z0Ty = Z0->getType();
3708 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
3709
3710 // If C is a low-bits mask, the zero extend is serving to
3711 // mask off the high bits. Complement the operand and
3712 // re-apply the zext.
3713 if (APIntOps::isMask(Z0TySize, CI->getValue()))
3714 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
3715
3716 // If C is a single bit, it may be in the sign-bit position
3717 // before the zero-extend. In this case, represent the xor
3718 // using an add, which is equivalent, and re-apply the zext.
3719 APInt Trunc = CI->getValue().trunc(Z0TySize);
3720 if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
3721 Trunc.isSignBit())
3722 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
3723 UTy);
3724 }
3725 }
3726 break;
3727
3728 case Instruction::Shl:
3729 // Turn shift left of a constant amount into a multiply.
3730 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3731 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3732
3733 // If the shift count is not less than the bitwidth, the result of
3734 // the shift is undefined. Don't try to analyze it, because the
3735 // resolution chosen here may differ from the resolution chosen in
3736 // other parts of the compiler.
3737 if (SA->getValue().uge(BitWidth))
3738 break;
3739
3740 Constant *X = ConstantInt::get(getContext(),
3741 APInt(BitWidth, 1).shl(SA->getZExtValue()));
3742 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3743 }
3744 break;
3745
3746 case Instruction::LShr:
3747 // Turn logical shift right of a constant into a unsigned divide.
3748 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
3749 uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
3750
3751 // If the shift count is not less than the bitwidth, the result of
3752 // the shift is undefined. Don't try to analyze it, because the
3753 // resolution chosen here may differ from the resolution chosen in
3754 // other parts of the compiler.
3755 if (SA->getValue().uge(BitWidth))
3756 break;
3757
3758 Constant *X = ConstantInt::get(getContext(),
3759 APInt(BitWidth, 1).shl(SA->getZExtValue()));
3760 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
3761 }
3762 break;
3763
3764 case Instruction::AShr:
3765 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
3766 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
3767 if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
3768 if (L->getOpcode() == Instruction::Shl &&
3769 L->getOperand(1) == U->getOperand(1)) {
3770 uint64_t BitWidth = getTypeSizeInBits(U->getType());
3771
3772 // If the shift count is not less than the bitwidth, the result of
3773 // the shift is undefined. Don't try to analyze it, because the
3774 // resolution chosen here may differ from the resolution chosen in
3775 // other parts of the compiler.
3776 if (CI->getValue().uge(BitWidth))
3777 break;
3778
3779 uint64_t Amt = BitWidth - CI->getZExtValue();
3780 if (Amt == BitWidth)
3781 return getSCEV(L->getOperand(0)); // shift by zero --> noop
3782 return
3783 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
3784 IntegerType::get(getContext(),
3785 Amt)),
3786 U->getType());
3787 }
3788 break;
3789
3790 case Instruction::Trunc:
3791 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
3792
3793 case Instruction::ZExt:
3794 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3795
3796 case Instruction::SExt:
3797 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
3798
3799 case Instruction::BitCast:
3800 // BitCasts are no-op casts so we just eliminate the cast.
3801 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
3802 return getSCEV(U->getOperand(0));
3803 break;
3804
3805 // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
3806 // lead to pointer expressions which cannot safely be expanded to GEPs,
3807 // because ScalarEvolution doesn't respect the GEP aliasing rules when
3808 // simplifying integer expressions.
3809
3810 case Instruction::GetElementPtr:
3811 return createNodeForGEP(cast<GEPOperator>(U));
3812
3813 case Instruction::PHI:
3814 return createNodeForPHI(cast<PHINode>(U));
3815
3816 case Instruction::Select:
3817 // This could be a smax or umax that was lowered earlier.
3818 // Try to recover it.
3819 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
3820 Value *LHS = ICI->getOperand(0);
3821 Value *RHS = ICI->getOperand(1);
3822 switch (ICI->getPredicate()) {
3823 case ICmpInst::ICMP_SLT:
3824 case ICmpInst::ICMP_SLE:
3825 std::swap(LHS, RHS);
3826 // fall through
3827 case ICmpInst::ICMP_SGT:
3828 case ICmpInst::ICMP_SGE:
3829 // a >s b ? a+x : b+x -> smax(a, b)+x
3830 // a >s b ? b+x : a+x -> smin(a, b)+x
3831 if (LHS->getType() == U->getType()) {
3832 const SCEV *LS = getSCEV(LHS);
3833 const SCEV *RS = getSCEV(RHS);
3834 const SCEV *LA = getSCEV(U->getOperand(1));
3835 const SCEV *RA = getSCEV(U->getOperand(2));
3836 const SCEV *LDiff = getMinusSCEV(LA, LS);
3837 const SCEV *RDiff = getMinusSCEV(RA, RS);
3838 if (LDiff == RDiff)
3839 return getAddExpr(getSMaxExpr(LS, RS), LDiff);
3840 LDiff = getMinusSCEV(LA, RS);
3841 RDiff = getMinusSCEV(RA, LS);
3842 if (LDiff == RDiff)
3843 return getAddExpr(getSMinExpr(LS, RS), LDiff);
3844 }
3845 break;
3846 case ICmpInst::ICMP_ULT:
3847 case ICmpInst::ICMP_ULE:
3848 std::swap(LHS, RHS);
3849 // fall through
3850 case ICmpInst::ICMP_UGT:
3851 case ICmpInst::ICMP_UGE:
3852 // a >u b ? a+x : b+x -> umax(a, b)+x
3853 // a >u b ? b+x : a+x -> umin(a, b)+x
3854 if (LHS->getType() == U->getType()) {
3855 const SCEV *LS = getSCEV(LHS);
3856 const SCEV *RS = getSCEV(RHS);
3857 const SCEV *LA = getSCEV(U->getOperand(1));
3858 const SCEV *RA = getSCEV(U->getOperand(2));
3859 const SCEV *LDiff = getMinusSCEV(LA, LS);
3860 const SCEV *RDiff = getMinusSCEV(RA, RS);
3861 if (LDiff == RDiff)
3862 return getAddExpr(getUMaxExpr(LS, RS), LDiff);
3863 LDiff = getMinusSCEV(LA, RS);
3864 RDiff = getMinusSCEV(RA, LS);
3865 if (LDiff == RDiff)
3866 return getAddExpr(getUMinExpr(LS, RS), LDiff);
3867 }
3868 break;
3869 case ICmpInst::ICMP_NE:
3870 // n != 0 ? n+x : 1+x -> umax(n, 1)+x
3871 if (LHS->getType() == U->getType() &&
3872 isa<ConstantInt>(RHS) &&
3873 cast<ConstantInt>(RHS)->isZero()) {
3874 const SCEV *One = getConstant(LHS->getType(), 1);
3875 const SCEV *LS = getSCEV(LHS);
3876 const SCEV *LA = getSCEV(U->getOperand(1));
3877 const SCEV *RA = getSCEV(U->getOperand(2));
3878 const SCEV *LDiff = getMinusSCEV(LA, LS);
3879 const SCEV *RDiff = getMinusSCEV(RA, One);
3880 if (LDiff == RDiff)
3881 return getAddExpr(getUMaxExpr(One, LS), LDiff);
3882 }
3883 break;
3884 case ICmpInst::ICMP_EQ:
3885 // n == 0 ? 1+x : n+x -> umax(n, 1)+x
3886 if (LHS->getType() == U->getType() &&
3887 isa<ConstantInt>(RHS) &&
3888 cast<ConstantInt>(RHS)->isZero()) {
3889 const SCEV *One = getConstant(LHS->getType(), 1);
3890 const SCEV *LS = getSCEV(LHS);
3891 const SCEV *LA = getSCEV(U->getOperand(1));
3892 const SCEV *RA = getSCEV(U->getOperand(2));
3893 const SCEV *LDiff = getMinusSCEV(LA, One);
3894 const SCEV *RDiff = getMinusSCEV(RA, LS);
3895 if (LDiff == RDiff)
3896 return getAddExpr(getUMaxExpr(One, LS), LDiff);
3897 }
3898 break;
3899 default:
3900 break;
3901 }
3902 }
3903
3904 default: // We cannot analyze this expression.
3905 break;
3906 }
3907
3908 return getUnknown(V);
3909 }
3910
3911
3912
3913 //===----------------------------------------------------------------------===//
3914 // Iteration Count Computation Code
3915 //
3916
3917 /// getSmallConstantTripCount - Returns the maximum trip count of this loop as a
3918 /// normal unsigned value, if possible. Returns 0 if the trip count is unknown
3919 /// or not constant. Will also return 0 if the maximum trip count is very large
3920 /// (>= 2^32)
getSmallConstantTripCount(Loop * L,BasicBlock * ExitBlock)3921 unsigned ScalarEvolution::getSmallConstantTripCount(Loop *L,
3922 BasicBlock *ExitBlock) {
3923 const SCEVConstant *ExitCount =
3924 dyn_cast<SCEVConstant>(getExitCount(L, ExitBlock));
3925 if (!ExitCount)
3926 return 0;
3927
3928 ConstantInt *ExitConst = ExitCount->getValue();
3929
3930 // Guard against huge trip counts.
3931 if (ExitConst->getValue().getActiveBits() > 32)
3932 return 0;
3933
3934 // In case of integer overflow, this returns 0, which is correct.
3935 return ((unsigned)ExitConst->getZExtValue()) + 1;
3936 }
3937
3938 /// getSmallConstantTripMultiple - Returns the largest constant divisor of the
3939 /// trip count of this loop as a normal unsigned value, if possible. This
3940 /// means that the actual trip count is always a multiple of the returned
3941 /// value (don't forget the trip count could very well be zero as well!).
3942 ///
3943 /// Returns 1 if the trip count is unknown or not guaranteed to be the
3944 /// multiple of a constant (which is also the case if the trip count is simply
3945 /// constant, use getSmallConstantTripCount for that case), Will also return 1
3946 /// if the trip count is very large (>= 2^32).
getSmallConstantTripMultiple(Loop * L,BasicBlock * ExitBlock)3947 unsigned ScalarEvolution::getSmallConstantTripMultiple(Loop *L,
3948 BasicBlock *ExitBlock) {
3949 const SCEV *ExitCount = getExitCount(L, ExitBlock);
3950 if (ExitCount == getCouldNotCompute())
3951 return 1;
3952
3953 // Get the trip count from the BE count by adding 1.
3954 const SCEV *TCMul = getAddExpr(ExitCount,
3955 getConstant(ExitCount->getType(), 1));
3956 // FIXME: SCEV distributes multiplication as V1*C1 + V2*C1. We could attempt
3957 // to factor simple cases.
3958 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(TCMul))
3959 TCMul = Mul->getOperand(0);
3960
3961 const SCEVConstant *MulC = dyn_cast<SCEVConstant>(TCMul);
3962 if (!MulC)
3963 return 1;
3964
3965 ConstantInt *Result = MulC->getValue();
3966
3967 // Guard against huge trip counts.
3968 if (!Result || Result->getValue().getActiveBits() > 32)
3969 return 1;
3970
3971 return (unsigned)Result->getZExtValue();
3972 }
3973
3974 // getExitCount - Get the expression for the number of loop iterations for which
3975 // this loop is guaranteed not to exit via ExitintBlock. Otherwise return
3976 // SCEVCouldNotCompute.
getExitCount(Loop * L,BasicBlock * ExitingBlock)3977 const SCEV *ScalarEvolution::getExitCount(Loop *L, BasicBlock *ExitingBlock) {
3978 return getBackedgeTakenInfo(L).getExact(ExitingBlock, this);
3979 }
3980
3981 /// getBackedgeTakenCount - If the specified loop has a predictable
3982 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
3983 /// object. The backedge-taken count is the number of times the loop header
3984 /// will be branched to from within the loop. This is one less than the
3985 /// trip count of the loop, since it doesn't count the first iteration,
3986 /// when the header is branched to from outside the loop.
3987 ///
3988 /// Note that it is not valid to call this method on a loop without a
3989 /// loop-invariant backedge-taken count (see
3990 /// hasLoopInvariantBackedgeTakenCount).
3991 ///
getBackedgeTakenCount(const Loop * L)3992 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
3993 return getBackedgeTakenInfo(L).getExact(this);
3994 }
3995
3996 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
3997 /// return the least SCEV value that is known never to be less than the
3998 /// actual backedge taken count.
getMaxBackedgeTakenCount(const Loop * L)3999 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
4000 return getBackedgeTakenInfo(L).getMax(this);
4001 }
4002
4003 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
4004 /// onto the given Worklist.
4005 static void
PushLoopPHIs(const Loop * L,SmallVectorImpl<Instruction * > & Worklist)4006 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
4007 BasicBlock *Header = L->getHeader();
4008
4009 // Push all Loop-header PHIs onto the Worklist stack.
4010 for (BasicBlock::iterator I = Header->begin();
4011 PHINode *PN = dyn_cast<PHINode>(I); ++I)
4012 Worklist.push_back(PN);
4013 }
4014
4015 const ScalarEvolution::BackedgeTakenInfo &
getBackedgeTakenInfo(const Loop * L)4016 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
4017 // Initially insert an invalid entry for this loop. If the insertion
4018 // succeeds, proceed to actually compute a backedge-taken count and
4019 // update the value. The temporary CouldNotCompute value tells SCEV
4020 // code elsewhere that it shouldn't attempt to request a new
4021 // backedge-taken count, which could result in infinite recursion.
4022 std::pair<DenseMap<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
4023 BackedgeTakenCounts.insert(std::make_pair(L, BackedgeTakenInfo()));
4024 if (!Pair.second)
4025 return Pair.first->second;
4026
4027 // ComputeBackedgeTakenCount may allocate memory for its result. Inserting it
4028 // into the BackedgeTakenCounts map transfers ownership. Otherwise, the result
4029 // must be cleared in this scope.
4030 BackedgeTakenInfo Result = ComputeBackedgeTakenCount(L);
4031
4032 if (Result.getExact(this) != getCouldNotCompute()) {
4033 assert(isLoopInvariant(Result.getExact(this), L) &&
4034 isLoopInvariant(Result.getMax(this), L) &&
4035 "Computed backedge-taken count isn't loop invariant for loop!");
4036 ++NumTripCountsComputed;
4037 }
4038 else if (Result.getMax(this) == getCouldNotCompute() &&
4039 isa<PHINode>(L->getHeader()->begin())) {
4040 // Only count loops that have phi nodes as not being computable.
4041 ++NumTripCountsNotComputed;
4042 }
4043
4044 // Now that we know more about the trip count for this loop, forget any
4045 // existing SCEV values for PHI nodes in this loop since they are only
4046 // conservative estimates made without the benefit of trip count
4047 // information. This is similar to the code in forgetLoop, except that
4048 // it handles SCEVUnknown PHI nodes specially.
4049 if (Result.hasAnyInfo()) {
4050 SmallVector<Instruction *, 16> Worklist;
4051 PushLoopPHIs(L, Worklist);
4052
4053 SmallPtrSet<Instruction *, 8> Visited;
4054 while (!Worklist.empty()) {
4055 Instruction *I = Worklist.pop_back_val();
4056 if (!Visited.insert(I)) continue;
4057
4058 ValueExprMapType::iterator It =
4059 ValueExprMap.find(static_cast<Value *>(I));
4060 if (It != ValueExprMap.end()) {
4061 const SCEV *Old = It->second;
4062
4063 // SCEVUnknown for a PHI either means that it has an unrecognized
4064 // structure, or it's a PHI that's in the progress of being computed
4065 // by createNodeForPHI. In the former case, additional loop trip
4066 // count information isn't going to change anything. In the later
4067 // case, createNodeForPHI will perform the necessary updates on its
4068 // own when it gets to that point.
4069 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
4070 forgetMemoizedResults(Old);
4071 ValueExprMap.erase(It);
4072 }
4073 if (PHINode *PN = dyn_cast<PHINode>(I))
4074 ConstantEvolutionLoopExitValue.erase(PN);
4075 }
4076
4077 PushDefUseChildren(I, Worklist);
4078 }
4079 }
4080
4081 // Re-lookup the insert position, since the call to
4082 // ComputeBackedgeTakenCount above could result in a
4083 // recusive call to getBackedgeTakenInfo (on a different
4084 // loop), which would invalidate the iterator computed
4085 // earlier.
4086 return BackedgeTakenCounts.find(L)->second = Result;
4087 }
4088
4089 /// forgetLoop - This method should be called by the client when it has
4090 /// changed a loop in a way that may effect ScalarEvolution's ability to
4091 /// compute a trip count, or if the loop is deleted.
forgetLoop(const Loop * L)4092 void ScalarEvolution::forgetLoop(const Loop *L) {
4093 // Drop any stored trip count value.
4094 DenseMap<const Loop*, BackedgeTakenInfo>::iterator BTCPos =
4095 BackedgeTakenCounts.find(L);
4096 if (BTCPos != BackedgeTakenCounts.end()) {
4097 BTCPos->second.clear();
4098 BackedgeTakenCounts.erase(BTCPos);
4099 }
4100
4101 // Drop information about expressions based on loop-header PHIs.
4102 SmallVector<Instruction *, 16> Worklist;
4103 PushLoopPHIs(L, Worklist);
4104
4105 SmallPtrSet<Instruction *, 8> Visited;
4106 while (!Worklist.empty()) {
4107 Instruction *I = Worklist.pop_back_val();
4108 if (!Visited.insert(I)) continue;
4109
4110 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
4111 if (It != ValueExprMap.end()) {
4112 forgetMemoizedResults(It->second);
4113 ValueExprMap.erase(It);
4114 if (PHINode *PN = dyn_cast<PHINode>(I))
4115 ConstantEvolutionLoopExitValue.erase(PN);
4116 }
4117
4118 PushDefUseChildren(I, Worklist);
4119 }
4120
4121 // Forget all contained loops too, to avoid dangling entries in the
4122 // ValuesAtScopes map.
4123 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4124 forgetLoop(*I);
4125 }
4126
4127 /// forgetValue - This method should be called by the client when it has
4128 /// changed a value in a way that may effect its value, or which may
4129 /// disconnect it from a def-use chain linking it to a loop.
forgetValue(Value * V)4130 void ScalarEvolution::forgetValue(Value *V) {
4131 Instruction *I = dyn_cast<Instruction>(V);
4132 if (!I) return;
4133
4134 // Drop information about expressions based on loop-header PHIs.
4135 SmallVector<Instruction *, 16> Worklist;
4136 Worklist.push_back(I);
4137
4138 SmallPtrSet<Instruction *, 8> Visited;
4139 while (!Worklist.empty()) {
4140 I = Worklist.pop_back_val();
4141 if (!Visited.insert(I)) continue;
4142
4143 ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
4144 if (It != ValueExprMap.end()) {
4145 forgetMemoizedResults(It->second);
4146 ValueExprMap.erase(It);
4147 if (PHINode *PN = dyn_cast<PHINode>(I))
4148 ConstantEvolutionLoopExitValue.erase(PN);
4149 }
4150
4151 PushDefUseChildren(I, Worklist);
4152 }
4153 }
4154
4155 /// getExact - Get the exact loop backedge taken count considering all loop
4156 /// exits. If all exits are computable, this is the minimum computed count.
4157 const SCEV *
getExact(ScalarEvolution * SE) const4158 ScalarEvolution::BackedgeTakenInfo::getExact(ScalarEvolution *SE) const {
4159 // If any exits were not computable, the loop is not computable.
4160 if (!ExitNotTaken.isCompleteList()) return SE->getCouldNotCompute();
4161
4162 // We need at least one computable exit.
4163 if (!ExitNotTaken.ExitingBlock) return SE->getCouldNotCompute();
4164 assert(ExitNotTaken.ExactNotTaken && "uninitialized not-taken info");
4165
4166 const SCEV *BECount = 0;
4167 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4168 ENT != 0; ENT = ENT->getNextExit()) {
4169
4170 assert(ENT->ExactNotTaken != SE->getCouldNotCompute() && "bad exit SCEV");
4171
4172 if (!BECount)
4173 BECount = ENT->ExactNotTaken;
4174 else
4175 BECount = SE->getUMinFromMismatchedTypes(BECount, ENT->ExactNotTaken);
4176 }
4177 assert(BECount && "Invalid not taken count for loop exit");
4178 return BECount;
4179 }
4180
4181 /// getExact - Get the exact not taken count for this loop exit.
4182 const SCEV *
getExact(BasicBlock * ExitingBlock,ScalarEvolution * SE) const4183 ScalarEvolution::BackedgeTakenInfo::getExact(BasicBlock *ExitingBlock,
4184 ScalarEvolution *SE) const {
4185 for (const ExitNotTakenInfo *ENT = &ExitNotTaken;
4186 ENT != 0; ENT = ENT->getNextExit()) {
4187
4188 if (ENT->ExitingBlock == ExitingBlock)
4189 return ENT->ExactNotTaken;
4190 }
4191 return SE->getCouldNotCompute();
4192 }
4193
4194 /// getMax - Get the max backedge taken count for the loop.
4195 const SCEV *
getMax(ScalarEvolution * SE) const4196 ScalarEvolution::BackedgeTakenInfo::getMax(ScalarEvolution *SE) const {
4197 return Max ? Max : SE->getCouldNotCompute();
4198 }
4199
4200 /// Allocate memory for BackedgeTakenInfo and copy the not-taken count of each
4201 /// computable exit into a persistent ExitNotTakenInfo array.
BackedgeTakenInfo(SmallVectorImpl<std::pair<BasicBlock *,const SCEV * >> & ExitCounts,bool Complete,const SCEV * MaxCount)4202 ScalarEvolution::BackedgeTakenInfo::BackedgeTakenInfo(
4203 SmallVectorImpl< std::pair<BasicBlock *, const SCEV *> > &ExitCounts,
4204 bool Complete, const SCEV *MaxCount) : Max(MaxCount) {
4205
4206 if (!Complete)
4207 ExitNotTaken.setIncomplete();
4208
4209 unsigned NumExits = ExitCounts.size();
4210 if (NumExits == 0) return;
4211
4212 ExitNotTaken.ExitingBlock = ExitCounts[0].first;
4213 ExitNotTaken.ExactNotTaken = ExitCounts[0].second;
4214 if (NumExits == 1) return;
4215
4216 // Handle the rare case of multiple computable exits.
4217 ExitNotTakenInfo *ENT = new ExitNotTakenInfo[NumExits-1];
4218
4219 ExitNotTakenInfo *PrevENT = &ExitNotTaken;
4220 for (unsigned i = 1; i < NumExits; ++i, PrevENT = ENT, ++ENT) {
4221 PrevENT->setNextExit(ENT);
4222 ENT->ExitingBlock = ExitCounts[i].first;
4223 ENT->ExactNotTaken = ExitCounts[i].second;
4224 }
4225 }
4226
4227 /// clear - Invalidate this result and free the ExitNotTakenInfo array.
clear()4228 void ScalarEvolution::BackedgeTakenInfo::clear() {
4229 ExitNotTaken.ExitingBlock = 0;
4230 ExitNotTaken.ExactNotTaken = 0;
4231 delete[] ExitNotTaken.getNextExit();
4232 }
4233
4234 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
4235 /// of the specified loop will execute.
4236 ScalarEvolution::BackedgeTakenInfo
ComputeBackedgeTakenCount(const Loop * L)4237 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
4238 SmallVector<BasicBlock *, 8> ExitingBlocks;
4239 L->getExitingBlocks(ExitingBlocks);
4240
4241 // Examine all exits and pick the most conservative values.
4242 const SCEV *MaxBECount = getCouldNotCompute();
4243 bool CouldComputeBECount = true;
4244 SmallVector<std::pair<BasicBlock *, const SCEV *>, 4> ExitCounts;
4245 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
4246 ExitLimit EL = ComputeExitLimit(L, ExitingBlocks[i]);
4247 if (EL.Exact == getCouldNotCompute())
4248 // We couldn't compute an exact value for this exit, so
4249 // we won't be able to compute an exact value for the loop.
4250 CouldComputeBECount = false;
4251 else
4252 ExitCounts.push_back(std::make_pair(ExitingBlocks[i], EL.Exact));
4253
4254 if (MaxBECount == getCouldNotCompute())
4255 MaxBECount = EL.Max;
4256 else if (EL.Max != getCouldNotCompute())
4257 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, EL.Max);
4258 }
4259
4260 return BackedgeTakenInfo(ExitCounts, CouldComputeBECount, MaxBECount);
4261 }
4262
4263 /// ComputeExitLimit - Compute the number of times the backedge of the specified
4264 /// loop will execute if it exits via the specified block.
4265 ScalarEvolution::ExitLimit
ComputeExitLimit(const Loop * L,BasicBlock * ExitingBlock)4266 ScalarEvolution::ComputeExitLimit(const Loop *L, BasicBlock *ExitingBlock) {
4267
4268 // Okay, we've chosen an exiting block. See what condition causes us to
4269 // exit at this block.
4270 //
4271 // FIXME: we should be able to handle switch instructions (with a single exit)
4272 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
4273 if (ExitBr == 0) return getCouldNotCompute();
4274 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
4275
4276 // At this point, we know we have a conditional branch that determines whether
4277 // the loop is exited. However, we don't know if the branch is executed each
4278 // time through the loop. If not, then the execution count of the branch will
4279 // not be equal to the trip count of the loop.
4280 //
4281 // Currently we check for this by checking to see if the Exit branch goes to
4282 // the loop header. If so, we know it will always execute the same number of
4283 // times as the loop. We also handle the case where the exit block *is* the
4284 // loop header. This is common for un-rotated loops.
4285 //
4286 // If both of those tests fail, walk up the unique predecessor chain to the
4287 // header, stopping if there is an edge that doesn't exit the loop. If the
4288 // header is reached, the execution count of the branch will be equal to the
4289 // trip count of the loop.
4290 //
4291 // More extensive analysis could be done to handle more cases here.
4292 //
4293 if (ExitBr->getSuccessor(0) != L->getHeader() &&
4294 ExitBr->getSuccessor(1) != L->getHeader() &&
4295 ExitBr->getParent() != L->getHeader()) {
4296 // The simple checks failed, try climbing the unique predecessor chain
4297 // up to the header.
4298 bool Ok = false;
4299 for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
4300 BasicBlock *Pred = BB->getUniquePredecessor();
4301 if (!Pred)
4302 return getCouldNotCompute();
4303 TerminatorInst *PredTerm = Pred->getTerminator();
4304 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
4305 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
4306 if (PredSucc == BB)
4307 continue;
4308 // If the predecessor has a successor that isn't BB and isn't
4309 // outside the loop, assume the worst.
4310 if (L->contains(PredSucc))
4311 return getCouldNotCompute();
4312 }
4313 if (Pred == L->getHeader()) {
4314 Ok = true;
4315 break;
4316 }
4317 BB = Pred;
4318 }
4319 if (!Ok)
4320 return getCouldNotCompute();
4321 }
4322
4323 // Proceed to the next level to examine the exit condition expression.
4324 return ComputeExitLimitFromCond(L, ExitBr->getCondition(),
4325 ExitBr->getSuccessor(0),
4326 ExitBr->getSuccessor(1));
4327 }
4328
4329 /// ComputeExitLimitFromCond - Compute the number of times the
4330 /// backedge of the specified loop will execute if its exit condition
4331 /// were a conditional branch of ExitCond, TBB, and FBB.
4332 ScalarEvolution::ExitLimit
ComputeExitLimitFromCond(const Loop * L,Value * ExitCond,BasicBlock * TBB,BasicBlock * FBB)4333 ScalarEvolution::ComputeExitLimitFromCond(const Loop *L,
4334 Value *ExitCond,
4335 BasicBlock *TBB,
4336 BasicBlock *FBB) {
4337 // Check if the controlling expression for this loop is an And or Or.
4338 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
4339 if (BO->getOpcode() == Instruction::And) {
4340 // Recurse on the operands of the and.
4341 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB);
4342 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB);
4343 const SCEV *BECount = getCouldNotCompute();
4344 const SCEV *MaxBECount = getCouldNotCompute();
4345 if (L->contains(TBB)) {
4346 // Both conditions must be true for the loop to continue executing.
4347 // Choose the less conservative count.
4348 if (EL0.Exact == getCouldNotCompute() ||
4349 EL1.Exact == getCouldNotCompute())
4350 BECount = getCouldNotCompute();
4351 else
4352 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4353 if (EL0.Max == getCouldNotCompute())
4354 MaxBECount = EL1.Max;
4355 else if (EL1.Max == getCouldNotCompute())
4356 MaxBECount = EL0.Max;
4357 else
4358 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
4359 } else {
4360 // Both conditions must be true at the same time for the loop to exit.
4361 // For now, be conservative.
4362 assert(L->contains(FBB) && "Loop block has no successor in loop!");
4363 if (EL0.Max == EL1.Max)
4364 MaxBECount = EL0.Max;
4365 if (EL0.Exact == EL1.Exact)
4366 BECount = EL0.Exact;
4367 }
4368
4369 return ExitLimit(BECount, MaxBECount);
4370 }
4371 if (BO->getOpcode() == Instruction::Or) {
4372 // Recurse on the operands of the or.
4373 ExitLimit EL0 = ComputeExitLimitFromCond(L, BO->getOperand(0), TBB, FBB);
4374 ExitLimit EL1 = ComputeExitLimitFromCond(L, BO->getOperand(1), TBB, FBB);
4375 const SCEV *BECount = getCouldNotCompute();
4376 const SCEV *MaxBECount = getCouldNotCompute();
4377 if (L->contains(FBB)) {
4378 // Both conditions must be false for the loop to continue executing.
4379 // Choose the less conservative count.
4380 if (EL0.Exact == getCouldNotCompute() ||
4381 EL1.Exact == getCouldNotCompute())
4382 BECount = getCouldNotCompute();
4383 else
4384 BECount = getUMinFromMismatchedTypes(EL0.Exact, EL1.Exact);
4385 if (EL0.Max == getCouldNotCompute())
4386 MaxBECount = EL1.Max;
4387 else if (EL1.Max == getCouldNotCompute())
4388 MaxBECount = EL0.Max;
4389 else
4390 MaxBECount = getUMinFromMismatchedTypes(EL0.Max, EL1.Max);
4391 } else {
4392 // Both conditions must be false at the same time for the loop to exit.
4393 // For now, be conservative.
4394 assert(L->contains(TBB) && "Loop block has no successor in loop!");
4395 if (EL0.Max == EL1.Max)
4396 MaxBECount = EL0.Max;
4397 if (EL0.Exact == EL1.Exact)
4398 BECount = EL0.Exact;
4399 }
4400
4401 return ExitLimit(BECount, MaxBECount);
4402 }
4403 }
4404
4405 // With an icmp, it may be feasible to compute an exact backedge-taken count.
4406 // Proceed to the next level to examine the icmp.
4407 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
4408 return ComputeExitLimitFromICmp(L, ExitCondICmp, TBB, FBB);
4409
4410 // Check for a constant condition. These are normally stripped out by
4411 // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
4412 // preserve the CFG and is temporarily leaving constant conditions
4413 // in place.
4414 if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
4415 if (L->contains(FBB) == !CI->getZExtValue())
4416 // The backedge is always taken.
4417 return getCouldNotCompute();
4418 else
4419 // The backedge is never taken.
4420 return getConstant(CI->getType(), 0);
4421 }
4422
4423 // If it's not an integer or pointer comparison then compute it the hard way.
4424 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
4425 }
4426
4427 /// ComputeExitLimitFromICmp - Compute the number of times the
4428 /// backedge of the specified loop will execute if its exit condition
4429 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
4430 ScalarEvolution::ExitLimit
ComputeExitLimitFromICmp(const Loop * L,ICmpInst * ExitCond,BasicBlock * TBB,BasicBlock * FBB)4431 ScalarEvolution::ComputeExitLimitFromICmp(const Loop *L,
4432 ICmpInst *ExitCond,
4433 BasicBlock *TBB,
4434 BasicBlock *FBB) {
4435
4436 // If the condition was exit on true, convert the condition to exit on false
4437 ICmpInst::Predicate Cond;
4438 if (!L->contains(FBB))
4439 Cond = ExitCond->getPredicate();
4440 else
4441 Cond = ExitCond->getInversePredicate();
4442
4443 // Handle common loops like: for (X = "string"; *X; ++X)
4444 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
4445 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
4446 ExitLimit ItCnt =
4447 ComputeLoadConstantCompareExitLimit(LI, RHS, L, Cond);
4448 if (ItCnt.hasAnyInfo())
4449 return ItCnt;
4450 }
4451
4452 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
4453 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
4454
4455 // Try to evaluate any dependencies out of the loop.
4456 LHS = getSCEVAtScope(LHS, L);
4457 RHS = getSCEVAtScope(RHS, L);
4458
4459 // At this point, we would like to compute how many iterations of the
4460 // loop the predicate will return true for these inputs.
4461 if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
4462 // If there is a loop-invariant, force it into the RHS.
4463 std::swap(LHS, RHS);
4464 Cond = ICmpInst::getSwappedPredicate(Cond);
4465 }
4466
4467 // Simplify the operands before analyzing them.
4468 (void)SimplifyICmpOperands(Cond, LHS, RHS);
4469
4470 // If we have a comparison of a chrec against a constant, try to use value
4471 // ranges to answer this query.
4472 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
4473 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
4474 if (AddRec->getLoop() == L) {
4475 // Form the constant range.
4476 ConstantRange CompRange(
4477 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
4478
4479 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
4480 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
4481 }
4482
4483 switch (Cond) {
4484 case ICmpInst::ICMP_NE: { // while (X != Y)
4485 // Convert to: while (X-Y != 0)
4486 ExitLimit EL = HowFarToZero(getMinusSCEV(LHS, RHS), L);
4487 if (EL.hasAnyInfo()) return EL;
4488 break;
4489 }
4490 case ICmpInst::ICMP_EQ: { // while (X == Y)
4491 // Convert to: while (X-Y == 0)
4492 ExitLimit EL = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
4493 if (EL.hasAnyInfo()) return EL;
4494 break;
4495 }
4496 case ICmpInst::ICMP_SLT: {
4497 ExitLimit EL = HowManyLessThans(LHS, RHS, L, true);
4498 if (EL.hasAnyInfo()) return EL;
4499 break;
4500 }
4501 case ICmpInst::ICMP_SGT: {
4502 ExitLimit EL = HowManyLessThans(getNotSCEV(LHS),
4503 getNotSCEV(RHS), L, true);
4504 if (EL.hasAnyInfo()) return EL;
4505 break;
4506 }
4507 case ICmpInst::ICMP_ULT: {
4508 ExitLimit EL = HowManyLessThans(LHS, RHS, L, false);
4509 if (EL.hasAnyInfo()) return EL;
4510 break;
4511 }
4512 case ICmpInst::ICMP_UGT: {
4513 ExitLimit EL = HowManyLessThans(getNotSCEV(LHS),
4514 getNotSCEV(RHS), L, false);
4515 if (EL.hasAnyInfo()) return EL;
4516 break;
4517 }
4518 default:
4519 #if 0
4520 dbgs() << "ComputeBackedgeTakenCount ";
4521 if (ExitCond->getOperand(0)->getType()->isUnsigned())
4522 dbgs() << "[unsigned] ";
4523 dbgs() << *LHS << " "
4524 << Instruction::getOpcodeName(Instruction::ICmp)
4525 << " " << *RHS << "\n";
4526 #endif
4527 break;
4528 }
4529 return ComputeExitCountExhaustively(L, ExitCond, !L->contains(TBB));
4530 }
4531
4532 static ConstantInt *
EvaluateConstantChrecAtConstant(const SCEVAddRecExpr * AddRec,ConstantInt * C,ScalarEvolution & SE)4533 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
4534 ScalarEvolution &SE) {
4535 const SCEV *InVal = SE.getConstant(C);
4536 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
4537 assert(isa<SCEVConstant>(Val) &&
4538 "Evaluation of SCEV at constant didn't fold correctly?");
4539 return cast<SCEVConstant>(Val)->getValue();
4540 }
4541
4542 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
4543 /// and a GEP expression (missing the pointer index) indexing into it, return
4544 /// the addressed element of the initializer or null if the index expression is
4545 /// invalid.
4546 static Constant *
GetAddressedElementFromGlobal(GlobalVariable * GV,const std::vector<ConstantInt * > & Indices)4547 GetAddressedElementFromGlobal(GlobalVariable *GV,
4548 const std::vector<ConstantInt*> &Indices) {
4549 Constant *Init = GV->getInitializer();
4550 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
4551 uint64_t Idx = Indices[i]->getZExtValue();
4552 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
4553 assert(Idx < CS->getNumOperands() && "Bad struct index!");
4554 Init = cast<Constant>(CS->getOperand(Idx));
4555 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
4556 if (Idx >= CA->getNumOperands()) return 0; // Bogus program
4557 Init = cast<Constant>(CA->getOperand(Idx));
4558 } else if (isa<ConstantAggregateZero>(Init)) {
4559 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
4560 assert(Idx < STy->getNumElements() && "Bad struct index!");
4561 Init = Constant::getNullValue(STy->getElementType(Idx));
4562 } else if (ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
4563 if (Idx >= ATy->getNumElements()) return 0; // Bogus program
4564 Init = Constant::getNullValue(ATy->getElementType());
4565 } else {
4566 llvm_unreachable("Unknown constant aggregate type!");
4567 }
4568 return 0;
4569 } else {
4570 return 0; // Unknown initializer type
4571 }
4572 }
4573 return Init;
4574 }
4575
4576 /// ComputeLoadConstantCompareExitLimit - Given an exit condition of
4577 /// 'icmp op load X, cst', try to see if we can compute the backedge
4578 /// execution count.
4579 ScalarEvolution::ExitLimit
ComputeLoadConstantCompareExitLimit(LoadInst * LI,Constant * RHS,const Loop * L,ICmpInst::Predicate predicate)4580 ScalarEvolution::ComputeLoadConstantCompareExitLimit(
4581 LoadInst *LI,
4582 Constant *RHS,
4583 const Loop *L,
4584 ICmpInst::Predicate predicate) {
4585
4586 if (LI->isVolatile()) return getCouldNotCompute();
4587
4588 // Check to see if the loaded pointer is a getelementptr of a global.
4589 // TODO: Use SCEV instead of manually grubbing with GEPs.
4590 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
4591 if (!GEP) return getCouldNotCompute();
4592
4593 // Make sure that it is really a constant global we are gepping, with an
4594 // initializer, and make sure the first IDX is really 0.
4595 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
4596 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
4597 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
4598 !cast<Constant>(GEP->getOperand(1))->isNullValue())
4599 return getCouldNotCompute();
4600
4601 // Okay, we allow one non-constant index into the GEP instruction.
4602 Value *VarIdx = 0;
4603 std::vector<ConstantInt*> Indexes;
4604 unsigned VarIdxNum = 0;
4605 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
4606 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
4607 Indexes.push_back(CI);
4608 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
4609 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
4610 VarIdx = GEP->getOperand(i);
4611 VarIdxNum = i-2;
4612 Indexes.push_back(0);
4613 }
4614
4615 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
4616 // Check to see if X is a loop variant variable value now.
4617 const SCEV *Idx = getSCEV(VarIdx);
4618 Idx = getSCEVAtScope(Idx, L);
4619
4620 // We can only recognize very limited forms of loop index expressions, in
4621 // particular, only affine AddRec's like {C1,+,C2}.
4622 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
4623 if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
4624 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
4625 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
4626 return getCouldNotCompute();
4627
4628 unsigned MaxSteps = MaxBruteForceIterations;
4629 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
4630 ConstantInt *ItCst = ConstantInt::get(
4631 cast<IntegerType>(IdxExpr->getType()), IterationNum);
4632 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
4633
4634 // Form the GEP offset.
4635 Indexes[VarIdxNum] = Val;
4636
4637 Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
4638 if (Result == 0) break; // Cannot compute!
4639
4640 // Evaluate the condition for this iteration.
4641 Result = ConstantExpr::getICmp(predicate, Result, RHS);
4642 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
4643 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
4644 #if 0
4645 dbgs() << "\n***\n*** Computed loop count " << *ItCst
4646 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
4647 << "***\n";
4648 #endif
4649 ++NumArrayLenItCounts;
4650 return getConstant(ItCst); // Found terminating iteration!
4651 }
4652 }
4653 return getCouldNotCompute();
4654 }
4655
4656
4657 /// CanConstantFold - Return true if we can constant fold an instruction of the
4658 /// specified type, assuming that all operands were constants.
CanConstantFold(const Instruction * I)4659 static bool CanConstantFold(const Instruction *I) {
4660 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
4661 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
4662 return true;
4663
4664 if (const CallInst *CI = dyn_cast<CallInst>(I))
4665 if (const Function *F = CI->getCalledFunction())
4666 return canConstantFoldCallTo(F);
4667 return false;
4668 }
4669
4670 /// Determine whether this instruction can constant evolve within this loop
4671 /// assuming its operands can all constant evolve.
canConstantEvolve(Instruction * I,const Loop * L)4672 static bool canConstantEvolve(Instruction *I, const Loop *L) {
4673 // An instruction outside of the loop can't be derived from a loop PHI.
4674 if (!L->contains(I)) return false;
4675
4676 if (isa<PHINode>(I)) {
4677 if (L->getHeader() == I->getParent())
4678 return true;
4679 else
4680 // We don't currently keep track of the control flow needed to evaluate
4681 // PHIs, so we cannot handle PHIs inside of loops.
4682 return false;
4683 }
4684
4685 // If we won't be able to constant fold this expression even if the operands
4686 // are constants, bail early.
4687 return CanConstantFold(I);
4688 }
4689
4690 /// getConstantEvolvingPHIOperands - Implement getConstantEvolvingPHI by
4691 /// recursing through each instruction operand until reaching a loop header phi.
4692 static PHINode *
getConstantEvolvingPHIOperands(Instruction * UseInst,const Loop * L,DenseMap<Instruction *,PHINode * > & PHIMap)4693 getConstantEvolvingPHIOperands(Instruction *UseInst, const Loop *L,
4694 DenseMap<Instruction *, PHINode *> &PHIMap) {
4695
4696 // Otherwise, we can evaluate this instruction if all of its operands are
4697 // constant or derived from a PHI node themselves.
4698 PHINode *PHI = 0;
4699 for (Instruction::op_iterator OpI = UseInst->op_begin(),
4700 OpE = UseInst->op_end(); OpI != OpE; ++OpI) {
4701
4702 if (isa<Constant>(*OpI)) continue;
4703
4704 Instruction *OpInst = dyn_cast<Instruction>(*OpI);
4705 if (!OpInst || !canConstantEvolve(OpInst, L)) return 0;
4706
4707 PHINode *P = dyn_cast<PHINode>(OpInst);
4708 if (!P)
4709 // If this operand is already visited, reuse the prior result.
4710 // We may have P != PHI if this is the deepest point at which the
4711 // inconsistent paths meet.
4712 P = PHIMap.lookup(OpInst);
4713 if (!P) {
4714 // Recurse and memoize the results, whether a phi is found or not.
4715 // This recursive call invalidates pointers into PHIMap.
4716 P = getConstantEvolvingPHIOperands(OpInst, L, PHIMap);
4717 PHIMap[OpInst] = P;
4718 }
4719 if (P == 0) return 0; // Not evolving from PHI
4720 if (PHI && PHI != P) return 0; // Evolving from multiple different PHIs.
4721 PHI = P;
4722 }
4723 // This is a expression evolving from a constant PHI!
4724 return PHI;
4725 }
4726
4727 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
4728 /// in the loop that V is derived from. We allow arbitrary operations along the
4729 /// way, but the operands of an operation must either be constants or a value
4730 /// derived from a constant PHI. If this expression does not fit with these
4731 /// constraints, return null.
getConstantEvolvingPHI(Value * V,const Loop * L)4732 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
4733 Instruction *I = dyn_cast<Instruction>(V);
4734 if (I == 0 || !canConstantEvolve(I, L)) return 0;
4735
4736 if (PHINode *PN = dyn_cast<PHINode>(I)) {
4737 return PN;
4738 }
4739
4740 // Record non-constant instructions contained by the loop.
4741 DenseMap<Instruction *, PHINode *> PHIMap;
4742 return getConstantEvolvingPHIOperands(I, L, PHIMap);
4743 }
4744
4745 /// EvaluateExpression - Given an expression that passes the
4746 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
4747 /// in the loop has the value PHIVal. If we can't fold this expression for some
4748 /// reason, return null.
EvaluateExpression(Value * V,const Loop * L,DenseMap<Instruction *,Constant * > & Vals,const TargetData * TD)4749 static Constant *EvaluateExpression(Value *V, const Loop *L,
4750 DenseMap<Instruction *, Constant *> &Vals,
4751 const TargetData *TD) {
4752 // Convenient constant check, but redundant for recursive calls.
4753 if (Constant *C = dyn_cast<Constant>(V)) return C;
4754
4755 Instruction *I = cast<Instruction>(V);
4756 if (Constant *C = Vals.lookup(I)) return C;
4757
4758 assert(!isa<PHINode>(I) && "loop header phis should be mapped to constant");
4759 assert(canConstantEvolve(I, L) && "cannot evaluate expression in this loop");
4760 (void)L;
4761
4762 std::vector<Constant*> Operands(I->getNumOperands());
4763
4764 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4765 Instruction *Operand = dyn_cast<Instruction>(I->getOperand(i));
4766 if (!Operand) {
4767 Operands[i] = dyn_cast<Constant>(I->getOperand(i));
4768 if (!Operands[i]) return 0;
4769 continue;
4770 }
4771 Constant *C = EvaluateExpression(Operand, L, Vals, TD);
4772 Vals[Operand] = C;
4773 if (!C) return 0;
4774 Operands[i] = C;
4775 }
4776
4777 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4778 return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
4779 Operands[1], TD);
4780 return ConstantFoldInstOperands(I->getOpcode(), I->getType(), Operands, TD);
4781 }
4782
4783 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
4784 /// in the header of its containing loop, we know the loop executes a
4785 /// constant number of times, and the PHI node is just a recurrence
4786 /// involving constants, fold it.
4787 Constant *
getConstantEvolutionLoopExitValue(PHINode * PN,const APInt & BEs,const Loop * L)4788 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
4789 const APInt &BEs,
4790 const Loop *L) {
4791 DenseMap<PHINode*, Constant*>::const_iterator I =
4792 ConstantEvolutionLoopExitValue.find(PN);
4793 if (I != ConstantEvolutionLoopExitValue.end())
4794 return I->second;
4795
4796 if (BEs.ugt(MaxBruteForceIterations))
4797 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
4798
4799 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
4800
4801 // FIXME: Nick's fix for PR11034 will seed constants for multiple header phis.
4802 DenseMap<Instruction *, Constant *> CurrentIterVals;
4803
4804 // Since the loop is canonicalized, the PHI node must have two entries. One
4805 // entry must be a constant (coming in from outside of the loop), and the
4806 // second must be derived from the same PHI.
4807 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4808 Constant *StartCST =
4809 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4810 if (StartCST == 0)
4811 return RetVal = 0; // Must be a constant.
4812 CurrentIterVals[PN] = StartCST;
4813
4814 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4815 if (getConstantEvolvingPHI(BEValue, L) != PN &&
4816 !isa<Constant>(BEValue))
4817 return RetVal = 0; // Not derived from same PHI.
4818
4819 // Execute the loop symbolically to determine the exit value.
4820 if (BEs.getActiveBits() >= 32)
4821 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
4822
4823 unsigned NumIterations = BEs.getZExtValue(); // must be in range
4824 unsigned IterationNum = 0;
4825 for (; ; ++IterationNum) {
4826 if (IterationNum == NumIterations)
4827 return RetVal = CurrentIterVals[PN]; // Got exit value!
4828
4829 // Compute the value of the PHI node for the next iteration.
4830 // EvaluateExpression adds non-phi values to the CurrentIterVals map.
4831 Constant *NextPHI = EvaluateExpression(BEValue, L, CurrentIterVals, TD);
4832 if (NextPHI == CurrentIterVals[PN])
4833 return RetVal = NextPHI; // Stopped evolving!
4834 if (NextPHI == 0)
4835 return 0; // Couldn't evaluate!
4836 DenseMap<Instruction *, Constant *> NextIterVals;
4837 NextIterVals[PN] = NextPHI;
4838 CurrentIterVals.swap(NextIterVals);
4839 }
4840 }
4841
4842 /// ComputeExitCountExhaustively - If the loop is known to execute a
4843 /// constant number of times (the condition evolves only from constants),
4844 /// try to evaluate a few iterations of the loop until we get the exit
4845 /// condition gets a value of ExitWhen (true or false). If we cannot
4846 /// evaluate the trip count of the loop, return getCouldNotCompute().
ComputeExitCountExhaustively(const Loop * L,Value * Cond,bool ExitWhen)4847 const SCEV * ScalarEvolution::ComputeExitCountExhaustively(const Loop *L,
4848 Value *Cond,
4849 bool ExitWhen) {
4850 PHINode *PN = getConstantEvolvingPHI(Cond, L);
4851 if (PN == 0) return getCouldNotCompute();
4852
4853 // If the loop is canonicalized, the PHI will have exactly two entries.
4854 // That's the only form we support here.
4855 if (PN->getNumIncomingValues() != 2) return getCouldNotCompute();
4856
4857 // One entry must be a constant (coming in from outside of the loop), and the
4858 // second must be derived from the same PHI.
4859 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
4860 Constant *StartCST =
4861 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
4862 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
4863
4864 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
4865 if (getConstantEvolvingPHI(BEValue, L) != PN &&
4866 !isa<Constant>(BEValue))
4867 return getCouldNotCompute(); // Not derived from same PHI.
4868
4869 // Okay, we find a PHI node that defines the trip count of this loop. Execute
4870 // the loop symbolically to determine when the condition gets a value of
4871 // "ExitWhen".
4872 unsigned IterationNum = 0;
4873 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
4874 for (Constant *PHIVal = StartCST;
4875 IterationNum != MaxIterations; ++IterationNum) {
4876 DenseMap<Instruction *, Constant *> PHIValMap;
4877 PHIValMap[PN] = PHIVal;
4878 ConstantInt *CondVal =
4879 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, L, PHIValMap, TD));
4880
4881 // Couldn't symbolically evaluate.
4882 if (!CondVal) return getCouldNotCompute();
4883
4884 if (CondVal->getValue() == uint64_t(ExitWhen)) {
4885 ++NumBruteForceTripCountsComputed;
4886 return getConstant(Type::getInt32Ty(getContext()), IterationNum);
4887 }
4888
4889 // Compute the value of the PHI node for the next iteration.
4890 Constant *NextPHI = EvaluateExpression(BEValue, L, PHIValMap, TD);
4891 if (NextPHI == 0 || NextPHI == PHIVal)
4892 return getCouldNotCompute();// Couldn't evaluate or not making progress...
4893 PHIVal = NextPHI;
4894 }
4895
4896 // Too many iterations were needed to evaluate.
4897 return getCouldNotCompute();
4898 }
4899
4900 /// getSCEVAtScope - Return a SCEV expression for the specified value
4901 /// at the specified scope in the program. The L value specifies a loop
4902 /// nest to evaluate the expression at, where null is the top-level or a
4903 /// specified loop is immediately inside of the loop.
4904 ///
4905 /// This method can be used to compute the exit value for a variable defined
4906 /// in a loop by querying what the value will hold in the parent loop.
4907 ///
4908 /// In the case that a relevant loop exit value cannot be computed, the
4909 /// original value V is returned.
getSCEVAtScope(const SCEV * V,const Loop * L)4910 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
4911 // Check to see if we've folded this expression at this loop before.
4912 std::map<const Loop *, const SCEV *> &Values = ValuesAtScopes[V];
4913 std::pair<std::map<const Loop *, const SCEV *>::iterator, bool> Pair =
4914 Values.insert(std::make_pair(L, static_cast<const SCEV *>(0)));
4915 if (!Pair.second)
4916 return Pair.first->second ? Pair.first->second : V;
4917
4918 // Otherwise compute it.
4919 const SCEV *C = computeSCEVAtScope(V, L);
4920 ValuesAtScopes[V][L] = C;
4921 return C;
4922 }
4923
computeSCEVAtScope(const SCEV * V,const Loop * L)4924 const SCEV *ScalarEvolution::computeSCEVAtScope(const SCEV *V, const Loop *L) {
4925 if (isa<SCEVConstant>(V)) return V;
4926
4927 // If this instruction is evolved from a constant-evolving PHI, compute the
4928 // exit value from the loop without using SCEVs.
4929 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
4930 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
4931 const Loop *LI = (*this->LI)[I->getParent()];
4932 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
4933 if (PHINode *PN = dyn_cast<PHINode>(I))
4934 if (PN->getParent() == LI->getHeader()) {
4935 // Okay, there is no closed form solution for the PHI node. Check
4936 // to see if the loop that contains it has a known backedge-taken
4937 // count. If so, we may be able to force computation of the exit
4938 // value.
4939 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
4940 if (const SCEVConstant *BTCC =
4941 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
4942 // Okay, we know how many times the containing loop executes. If
4943 // this is a constant evolving PHI node, get the final value at
4944 // the specified iteration number.
4945 Constant *RV = getConstantEvolutionLoopExitValue(PN,
4946 BTCC->getValue()->getValue(),
4947 LI);
4948 if (RV) return getSCEV(RV);
4949 }
4950 }
4951
4952 // Okay, this is an expression that we cannot symbolically evaluate
4953 // into a SCEV. Check to see if it's possible to symbolically evaluate
4954 // the arguments into constants, and if so, try to constant propagate the
4955 // result. This is particularly useful for computing loop exit values.
4956 if (CanConstantFold(I)) {
4957 SmallVector<Constant *, 4> Operands;
4958 bool MadeImprovement = false;
4959 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
4960 Value *Op = I->getOperand(i);
4961 if (Constant *C = dyn_cast<Constant>(Op)) {
4962 Operands.push_back(C);
4963 continue;
4964 }
4965
4966 // If any of the operands is non-constant and if they are
4967 // non-integer and non-pointer, don't even try to analyze them
4968 // with scev techniques.
4969 if (!isSCEVable(Op->getType()))
4970 return V;
4971
4972 const SCEV *OrigV = getSCEV(Op);
4973 const SCEV *OpV = getSCEVAtScope(OrigV, L);
4974 MadeImprovement |= OrigV != OpV;
4975
4976 Constant *C = 0;
4977 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV))
4978 C = SC->getValue();
4979 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV))
4980 C = dyn_cast<Constant>(SU->getValue());
4981 if (!C) return V;
4982 if (C->getType() != Op->getType())
4983 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
4984 Op->getType(),
4985 false),
4986 C, Op->getType());
4987 Operands.push_back(C);
4988 }
4989
4990 // Check to see if getSCEVAtScope actually made an improvement.
4991 if (MadeImprovement) {
4992 Constant *C = 0;
4993 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
4994 C = ConstantFoldCompareInstOperands(CI->getPredicate(),
4995 Operands[0], Operands[1], TD);
4996 else
4997 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
4998 Operands, TD);
4999 if (!C) return V;
5000 return getSCEV(C);
5001 }
5002 }
5003 }
5004
5005 // This is some other type of SCEVUnknown, just return it.
5006 return V;
5007 }
5008
5009 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
5010 // Avoid performing the look-up in the common case where the specified
5011 // expression has no loop-variant portions.
5012 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
5013 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5014 if (OpAtScope != Comm->getOperand(i)) {
5015 // Okay, at least one of these operands is loop variant but might be
5016 // foldable. Build a new instance of the folded commutative expression.
5017 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
5018 Comm->op_begin()+i);
5019 NewOps.push_back(OpAtScope);
5020
5021 for (++i; i != e; ++i) {
5022 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
5023 NewOps.push_back(OpAtScope);
5024 }
5025 if (isa<SCEVAddExpr>(Comm))
5026 return getAddExpr(NewOps);
5027 if (isa<SCEVMulExpr>(Comm))
5028 return getMulExpr(NewOps);
5029 if (isa<SCEVSMaxExpr>(Comm))
5030 return getSMaxExpr(NewOps);
5031 if (isa<SCEVUMaxExpr>(Comm))
5032 return getUMaxExpr(NewOps);
5033 llvm_unreachable("Unknown commutative SCEV type!");
5034 }
5035 }
5036 // If we got here, all operands are loop invariant.
5037 return Comm;
5038 }
5039
5040 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
5041 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
5042 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
5043 if (LHS == Div->getLHS() && RHS == Div->getRHS())
5044 return Div; // must be loop invariant
5045 return getUDivExpr(LHS, RHS);
5046 }
5047
5048 // If this is a loop recurrence for a loop that does not contain L, then we
5049 // are dealing with the final value computed by the loop.
5050 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
5051 // First, attempt to evaluate each operand.
5052 // Avoid performing the look-up in the common case where the specified
5053 // expression has no loop-variant portions.
5054 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
5055 const SCEV *OpAtScope = getSCEVAtScope(AddRec->getOperand(i), L);
5056 if (OpAtScope == AddRec->getOperand(i))
5057 continue;
5058
5059 // Okay, at least one of these operands is loop variant but might be
5060 // foldable. Build a new instance of the folded commutative expression.
5061 SmallVector<const SCEV *, 8> NewOps(AddRec->op_begin(),
5062 AddRec->op_begin()+i);
5063 NewOps.push_back(OpAtScope);
5064 for (++i; i != e; ++i)
5065 NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
5066
5067 const SCEV *FoldedRec =
5068 getAddRecExpr(NewOps, AddRec->getLoop(),
5069 AddRec->getNoWrapFlags(SCEV::FlagNW));
5070 AddRec = dyn_cast<SCEVAddRecExpr>(FoldedRec);
5071 // The addrec may be folded to a nonrecurrence, for example, if the
5072 // induction variable is multiplied by zero after constant folding. Go
5073 // ahead and return the folded value.
5074 if (!AddRec)
5075 return FoldedRec;
5076 break;
5077 }
5078
5079 // If the scope is outside the addrec's loop, evaluate it by using the
5080 // loop exit value of the addrec.
5081 if (!AddRec->getLoop()->contains(L)) {
5082 // To evaluate this recurrence, we need to know how many times the AddRec
5083 // loop iterates. Compute this now.
5084 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
5085 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
5086
5087 // Then, evaluate the AddRec.
5088 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
5089 }
5090
5091 return AddRec;
5092 }
5093
5094 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
5095 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5096 if (Op == Cast->getOperand())
5097 return Cast; // must be loop invariant
5098 return getZeroExtendExpr(Op, Cast->getType());
5099 }
5100
5101 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
5102 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5103 if (Op == Cast->getOperand())
5104 return Cast; // must be loop invariant
5105 return getSignExtendExpr(Op, Cast->getType());
5106 }
5107
5108 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
5109 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
5110 if (Op == Cast->getOperand())
5111 return Cast; // must be loop invariant
5112 return getTruncateExpr(Op, Cast->getType());
5113 }
5114
5115 llvm_unreachable("Unknown SCEV type!");
5116 return 0;
5117 }
5118
5119 /// getSCEVAtScope - This is a convenience function which does
5120 /// getSCEVAtScope(getSCEV(V), L).
getSCEVAtScope(Value * V,const Loop * L)5121 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
5122 return getSCEVAtScope(getSCEV(V), L);
5123 }
5124
5125 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
5126 /// following equation:
5127 ///
5128 /// A * X = B (mod N)
5129 ///
5130 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
5131 /// A and B isn't important.
5132 ///
5133 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
SolveLinEquationWithOverflow(const APInt & A,const APInt & B,ScalarEvolution & SE)5134 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
5135 ScalarEvolution &SE) {
5136 uint32_t BW = A.getBitWidth();
5137 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
5138 assert(A != 0 && "A must be non-zero.");
5139
5140 // 1. D = gcd(A, N)
5141 //
5142 // The gcd of A and N may have only one prime factor: 2. The number of
5143 // trailing zeros in A is its multiplicity
5144 uint32_t Mult2 = A.countTrailingZeros();
5145 // D = 2^Mult2
5146
5147 // 2. Check if B is divisible by D.
5148 //
5149 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
5150 // is not less than multiplicity of this prime factor for D.
5151 if (B.countTrailingZeros() < Mult2)
5152 return SE.getCouldNotCompute();
5153
5154 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
5155 // modulo (N / D).
5156 //
5157 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
5158 // bit width during computations.
5159 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
5160 APInt Mod(BW + 1, 0);
5161 Mod.setBit(BW - Mult2); // Mod = N / D
5162 APInt I = AD.multiplicativeInverse(Mod);
5163
5164 // 4. Compute the minimum unsigned root of the equation:
5165 // I * (B / D) mod (N / D)
5166 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
5167
5168 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
5169 // bits.
5170 return SE.getConstant(Result.trunc(BW));
5171 }
5172
5173 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
5174 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
5175 /// might be the same) or two SCEVCouldNotCompute objects.
5176 ///
5177 static std::pair<const SCEV *,const SCEV *>
SolveQuadraticEquation(const SCEVAddRecExpr * AddRec,ScalarEvolution & SE)5178 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
5179 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
5180 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
5181 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
5182 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
5183
5184 // We currently can only solve this if the coefficients are constants.
5185 if (!LC || !MC || !NC) {
5186 const SCEV *CNC = SE.getCouldNotCompute();
5187 return std::make_pair(CNC, CNC);
5188 }
5189
5190 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
5191 const APInt &L = LC->getValue()->getValue();
5192 const APInt &M = MC->getValue()->getValue();
5193 const APInt &N = NC->getValue()->getValue();
5194 APInt Two(BitWidth, 2);
5195 APInt Four(BitWidth, 4);
5196
5197 {
5198 using namespace APIntOps;
5199 const APInt& C = L;
5200 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
5201 // The B coefficient is M-N/2
5202 APInt B(M);
5203 B -= sdiv(N,Two);
5204
5205 // The A coefficient is N/2
5206 APInt A(N.sdiv(Two));
5207
5208 // Compute the B^2-4ac term.
5209 APInt SqrtTerm(B);
5210 SqrtTerm *= B;
5211 SqrtTerm -= Four * (A * C);
5212
5213 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
5214 // integer value or else APInt::sqrt() will assert.
5215 APInt SqrtVal(SqrtTerm.sqrt());
5216
5217 // Compute the two solutions for the quadratic formula.
5218 // The divisions must be performed as signed divisions.
5219 APInt NegB(-B);
5220 APInt TwoA(A << 1);
5221 if (TwoA.isMinValue()) {
5222 const SCEV *CNC = SE.getCouldNotCompute();
5223 return std::make_pair(CNC, CNC);
5224 }
5225
5226 LLVMContext &Context = SE.getContext();
5227
5228 ConstantInt *Solution1 =
5229 ConstantInt::get(Context, (NegB + SqrtVal).sdiv(TwoA));
5230 ConstantInt *Solution2 =
5231 ConstantInt::get(Context, (NegB - SqrtVal).sdiv(TwoA));
5232
5233 return std::make_pair(SE.getConstant(Solution1),
5234 SE.getConstant(Solution2));
5235 } // end APIntOps namespace
5236 }
5237
5238 /// HowFarToZero - Return the number of times a backedge comparing the specified
5239 /// value to zero will execute. If not computable, return CouldNotCompute.
5240 ///
5241 /// This is only used for loops with a "x != y" exit test. The exit condition is
5242 /// now expressed as a single expression, V = x-y. So the exit test is
5243 /// effectively V != 0. We know and take advantage of the fact that this
5244 /// expression only being used in a comparison by zero context.
5245 ScalarEvolution::ExitLimit
HowFarToZero(const SCEV * V,const Loop * L)5246 ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
5247 // If the value is a constant
5248 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
5249 // If the value is already zero, the branch will execute zero times.
5250 if (C->getValue()->isZero()) return C;
5251 return getCouldNotCompute(); // Otherwise it will loop infinitely.
5252 }
5253
5254 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
5255 if (!AddRec || AddRec->getLoop() != L)
5256 return getCouldNotCompute();
5257
5258 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
5259 // the quadratic equation to solve it.
5260 if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
5261 std::pair<const SCEV *,const SCEV *> Roots =
5262 SolveQuadraticEquation(AddRec, *this);
5263 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
5264 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
5265 if (R1 && R2) {
5266 #if 0
5267 dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
5268 << " sol#2: " << *R2 << "\n";
5269 #endif
5270 // Pick the smallest positive root value.
5271 if (ConstantInt *CB =
5272 dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
5273 R1->getValue(),
5274 R2->getValue()))) {
5275 if (CB->getZExtValue() == false)
5276 std::swap(R1, R2); // R1 is the minimum root now.
5277
5278 // We can only use this value if the chrec ends up with an exact zero
5279 // value at this index. When solving for "X*X != 5", for example, we
5280 // should not accept a root of 2.
5281 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
5282 if (Val->isZero())
5283 return R1; // We found a quadratic root!
5284 }
5285 }
5286 return getCouldNotCompute();
5287 }
5288
5289 // Otherwise we can only handle this if it is affine.
5290 if (!AddRec->isAffine())
5291 return getCouldNotCompute();
5292
5293 // If this is an affine expression, the execution count of this branch is
5294 // the minimum unsigned root of the following equation:
5295 //
5296 // Start + Step*N = 0 (mod 2^BW)
5297 //
5298 // equivalent to:
5299 //
5300 // Step*N = -Start (mod 2^BW)
5301 //
5302 // where BW is the common bit width of Start and Step.
5303
5304 // Get the initial value for the loop.
5305 const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
5306 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
5307
5308 // For now we handle only constant steps.
5309 //
5310 // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
5311 // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
5312 // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
5313 // We have not yet seen any such cases.
5314 const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
5315 if (StepC == 0)
5316 return getCouldNotCompute();
5317
5318 // For positive steps (counting up until unsigned overflow):
5319 // N = -Start/Step (as unsigned)
5320 // For negative steps (counting down to zero):
5321 // N = Start/-Step
5322 // First compute the unsigned distance from zero in the direction of Step.
5323 bool CountDown = StepC->getValue()->getValue().isNegative();
5324 const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
5325
5326 // Handle unitary steps, which cannot wraparound.
5327 // 1*N = -Start; -1*N = Start (mod 2^BW), so:
5328 // N = Distance (as unsigned)
5329 if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue()) {
5330 ConstantRange CR = getUnsignedRange(Start);
5331 const SCEV *MaxBECount;
5332 if (!CountDown && CR.getUnsignedMin().isMinValue())
5333 // When counting up, the worst starting value is 1, not 0.
5334 MaxBECount = CR.getUnsignedMax().isMinValue()
5335 ? getConstant(APInt::getMinValue(CR.getBitWidth()))
5336 : getConstant(APInt::getMaxValue(CR.getBitWidth()));
5337 else
5338 MaxBECount = getConstant(CountDown ? CR.getUnsignedMax()
5339 : -CR.getUnsignedMin());
5340 return ExitLimit(Distance, MaxBECount);
5341 }
5342
5343 // If the recurrence is known not to wraparound, unsigned divide computes the
5344 // back edge count. We know that the value will either become zero (and thus
5345 // the loop terminates), that the loop will terminate through some other exit
5346 // condition first, or that the loop has undefined behavior. This means
5347 // we can't "miss" the exit value, even with nonunit stride.
5348 //
5349 // FIXME: Prove that loops always exhibits *acceptable* undefined
5350 // behavior. Loops must exhibit defined behavior until a wrapped value is
5351 // actually used. So the trip count computed by udiv could be smaller than the
5352 // number of well-defined iterations.
5353 if (AddRec->getNoWrapFlags(SCEV::FlagNW))
5354 // FIXME: We really want an "isexact" bit for udiv.
5355 return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
5356
5357 // Then, try to solve the above equation provided that Start is constant.
5358 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
5359 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
5360 -StartC->getValue()->getValue(),
5361 *this);
5362 return getCouldNotCompute();
5363 }
5364
5365 /// HowFarToNonZero - Return the number of times a backedge checking the
5366 /// specified value for nonzero will execute. If not computable, return
5367 /// CouldNotCompute
5368 ScalarEvolution::ExitLimit
HowFarToNonZero(const SCEV * V,const Loop * L)5369 ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
5370 // Loops that look like: while (X == 0) are very strange indeed. We don't
5371 // handle them yet except for the trivial case. This could be expanded in the
5372 // future as needed.
5373
5374 // If the value is a constant, check to see if it is known to be non-zero
5375 // already. If so, the backedge will execute zero times.
5376 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
5377 if (!C->getValue()->isNullValue())
5378 return getConstant(C->getType(), 0);
5379 return getCouldNotCompute(); // Otherwise it will loop infinitely.
5380 }
5381
5382 // We could implement others, but I really doubt anyone writes loops like
5383 // this, and if they did, they would already be constant folded.
5384 return getCouldNotCompute();
5385 }
5386
5387 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
5388 /// (which may not be an immediate predecessor) which has exactly one
5389 /// successor from which BB is reachable, or null if no such block is
5390 /// found.
5391 ///
5392 std::pair<BasicBlock *, BasicBlock *>
getPredecessorWithUniqueSuccessorForBB(BasicBlock * BB)5393 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
5394 // If the block has a unique predecessor, then there is no path from the
5395 // predecessor to the block that does not go through the direct edge
5396 // from the predecessor to the block.
5397 if (BasicBlock *Pred = BB->getSinglePredecessor())
5398 return std::make_pair(Pred, BB);
5399
5400 // A loop's header is defined to be a block that dominates the loop.
5401 // If the header has a unique predecessor outside the loop, it must be
5402 // a block that has exactly one successor that can reach the loop.
5403 if (Loop *L = LI->getLoopFor(BB))
5404 return std::make_pair(L->getLoopPredecessor(), L->getHeader());
5405
5406 return std::pair<BasicBlock *, BasicBlock *>();
5407 }
5408
5409 /// HasSameValue - SCEV structural equivalence is usually sufficient for
5410 /// testing whether two expressions are equal, however for the purposes of
5411 /// looking for a condition guarding a loop, it can be useful to be a little
5412 /// more general, since a front-end may have replicated the controlling
5413 /// expression.
5414 ///
HasSameValue(const SCEV * A,const SCEV * B)5415 static bool HasSameValue(const SCEV *A, const SCEV *B) {
5416 // Quick check to see if they are the same SCEV.
5417 if (A == B) return true;
5418
5419 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
5420 // two different instructions with the same value. Check for this case.
5421 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
5422 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
5423 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
5424 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
5425 if (AI->isIdenticalTo(BI) && !AI->mayReadFromMemory())
5426 return true;
5427
5428 // Otherwise assume they may have a different value.
5429 return false;
5430 }
5431
5432 /// SimplifyICmpOperands - Simplify LHS and RHS in a comparison with
5433 /// predicate Pred. Return true iff any changes were made.
5434 ///
SimplifyICmpOperands(ICmpInst::Predicate & Pred,const SCEV * & LHS,const SCEV * & RHS)5435 bool ScalarEvolution::SimplifyICmpOperands(ICmpInst::Predicate &Pred,
5436 const SCEV *&LHS, const SCEV *&RHS) {
5437 bool Changed = false;
5438
5439 // Canonicalize a constant to the right side.
5440 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
5441 // Check for both operands constant.
5442 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
5443 if (ConstantExpr::getICmp(Pred,
5444 LHSC->getValue(),
5445 RHSC->getValue())->isNullValue())
5446 goto trivially_false;
5447 else
5448 goto trivially_true;
5449 }
5450 // Otherwise swap the operands to put the constant on the right.
5451 std::swap(LHS, RHS);
5452 Pred = ICmpInst::getSwappedPredicate(Pred);
5453 Changed = true;
5454 }
5455
5456 // If we're comparing an addrec with a value which is loop-invariant in the
5457 // addrec's loop, put the addrec on the left. Also make a dominance check,
5458 // as both operands could be addrecs loop-invariant in each other's loop.
5459 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
5460 const Loop *L = AR->getLoop();
5461 if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
5462 std::swap(LHS, RHS);
5463 Pred = ICmpInst::getSwappedPredicate(Pred);
5464 Changed = true;
5465 }
5466 }
5467
5468 // If there's a constant operand, canonicalize comparisons with boundary
5469 // cases, and canonicalize *-or-equal comparisons to regular comparisons.
5470 if (const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS)) {
5471 const APInt &RA = RC->getValue()->getValue();
5472 switch (Pred) {
5473 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5474 case ICmpInst::ICMP_EQ:
5475 case ICmpInst::ICMP_NE:
5476 break;
5477 case ICmpInst::ICMP_UGE:
5478 if ((RA - 1).isMinValue()) {
5479 Pred = ICmpInst::ICMP_NE;
5480 RHS = getConstant(RA - 1);
5481 Changed = true;
5482 break;
5483 }
5484 if (RA.isMaxValue()) {
5485 Pred = ICmpInst::ICMP_EQ;
5486 Changed = true;
5487 break;
5488 }
5489 if (RA.isMinValue()) goto trivially_true;
5490
5491 Pred = ICmpInst::ICMP_UGT;
5492 RHS = getConstant(RA - 1);
5493 Changed = true;
5494 break;
5495 case ICmpInst::ICMP_ULE:
5496 if ((RA + 1).isMaxValue()) {
5497 Pred = ICmpInst::ICMP_NE;
5498 RHS = getConstant(RA + 1);
5499 Changed = true;
5500 break;
5501 }
5502 if (RA.isMinValue()) {
5503 Pred = ICmpInst::ICMP_EQ;
5504 Changed = true;
5505 break;
5506 }
5507 if (RA.isMaxValue()) goto trivially_true;
5508
5509 Pred = ICmpInst::ICMP_ULT;
5510 RHS = getConstant(RA + 1);
5511 Changed = true;
5512 break;
5513 case ICmpInst::ICMP_SGE:
5514 if ((RA - 1).isMinSignedValue()) {
5515 Pred = ICmpInst::ICMP_NE;
5516 RHS = getConstant(RA - 1);
5517 Changed = true;
5518 break;
5519 }
5520 if (RA.isMaxSignedValue()) {
5521 Pred = ICmpInst::ICMP_EQ;
5522 Changed = true;
5523 break;
5524 }
5525 if (RA.isMinSignedValue()) goto trivially_true;
5526
5527 Pred = ICmpInst::ICMP_SGT;
5528 RHS = getConstant(RA - 1);
5529 Changed = true;
5530 break;
5531 case ICmpInst::ICMP_SLE:
5532 if ((RA + 1).isMaxSignedValue()) {
5533 Pred = ICmpInst::ICMP_NE;
5534 RHS = getConstant(RA + 1);
5535 Changed = true;
5536 break;
5537 }
5538 if (RA.isMinSignedValue()) {
5539 Pred = ICmpInst::ICMP_EQ;
5540 Changed = true;
5541 break;
5542 }
5543 if (RA.isMaxSignedValue()) goto trivially_true;
5544
5545 Pred = ICmpInst::ICMP_SLT;
5546 RHS = getConstant(RA + 1);
5547 Changed = true;
5548 break;
5549 case ICmpInst::ICMP_UGT:
5550 if (RA.isMinValue()) {
5551 Pred = ICmpInst::ICMP_NE;
5552 Changed = true;
5553 break;
5554 }
5555 if ((RA + 1).isMaxValue()) {
5556 Pred = ICmpInst::ICMP_EQ;
5557 RHS = getConstant(RA + 1);
5558 Changed = true;
5559 break;
5560 }
5561 if (RA.isMaxValue()) goto trivially_false;
5562 break;
5563 case ICmpInst::ICMP_ULT:
5564 if (RA.isMaxValue()) {
5565 Pred = ICmpInst::ICMP_NE;
5566 Changed = true;
5567 break;
5568 }
5569 if ((RA - 1).isMinValue()) {
5570 Pred = ICmpInst::ICMP_EQ;
5571 RHS = getConstant(RA - 1);
5572 Changed = true;
5573 break;
5574 }
5575 if (RA.isMinValue()) goto trivially_false;
5576 break;
5577 case ICmpInst::ICMP_SGT:
5578 if (RA.isMinSignedValue()) {
5579 Pred = ICmpInst::ICMP_NE;
5580 Changed = true;
5581 break;
5582 }
5583 if ((RA + 1).isMaxSignedValue()) {
5584 Pred = ICmpInst::ICMP_EQ;
5585 RHS = getConstant(RA + 1);
5586 Changed = true;
5587 break;
5588 }
5589 if (RA.isMaxSignedValue()) goto trivially_false;
5590 break;
5591 case ICmpInst::ICMP_SLT:
5592 if (RA.isMaxSignedValue()) {
5593 Pred = ICmpInst::ICMP_NE;
5594 Changed = true;
5595 break;
5596 }
5597 if ((RA - 1).isMinSignedValue()) {
5598 Pred = ICmpInst::ICMP_EQ;
5599 RHS = getConstant(RA - 1);
5600 Changed = true;
5601 break;
5602 }
5603 if (RA.isMinSignedValue()) goto trivially_false;
5604 break;
5605 }
5606 }
5607
5608 // Check for obvious equality.
5609 if (HasSameValue(LHS, RHS)) {
5610 if (ICmpInst::isTrueWhenEqual(Pred))
5611 goto trivially_true;
5612 if (ICmpInst::isFalseWhenEqual(Pred))
5613 goto trivially_false;
5614 }
5615
5616 // If possible, canonicalize GE/LE comparisons to GT/LT comparisons, by
5617 // adding or subtracting 1 from one of the operands.
5618 switch (Pred) {
5619 case ICmpInst::ICMP_SLE:
5620 if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
5621 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5622 SCEV::FlagNSW);
5623 Pred = ICmpInst::ICMP_SLT;
5624 Changed = true;
5625 } else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
5626 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5627 SCEV::FlagNSW);
5628 Pred = ICmpInst::ICMP_SLT;
5629 Changed = true;
5630 }
5631 break;
5632 case ICmpInst::ICMP_SGE:
5633 if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
5634 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5635 SCEV::FlagNSW);
5636 Pred = ICmpInst::ICMP_SGT;
5637 Changed = true;
5638 } else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
5639 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5640 SCEV::FlagNSW);
5641 Pred = ICmpInst::ICMP_SGT;
5642 Changed = true;
5643 }
5644 break;
5645 case ICmpInst::ICMP_ULE:
5646 if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
5647 RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
5648 SCEV::FlagNUW);
5649 Pred = ICmpInst::ICMP_ULT;
5650 Changed = true;
5651 } else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
5652 LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
5653 SCEV::FlagNUW);
5654 Pred = ICmpInst::ICMP_ULT;
5655 Changed = true;
5656 }
5657 break;
5658 case ICmpInst::ICMP_UGE:
5659 if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
5660 RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
5661 SCEV::FlagNUW);
5662 Pred = ICmpInst::ICMP_UGT;
5663 Changed = true;
5664 } else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
5665 LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
5666 SCEV::FlagNUW);
5667 Pred = ICmpInst::ICMP_UGT;
5668 Changed = true;
5669 }
5670 break;
5671 default:
5672 break;
5673 }
5674
5675 // TODO: More simplifications are possible here.
5676
5677 return Changed;
5678
5679 trivially_true:
5680 // Return 0 == 0.
5681 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
5682 Pred = ICmpInst::ICMP_EQ;
5683 return true;
5684
5685 trivially_false:
5686 // Return 0 != 0.
5687 LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
5688 Pred = ICmpInst::ICMP_NE;
5689 return true;
5690 }
5691
isKnownNegative(const SCEV * S)5692 bool ScalarEvolution::isKnownNegative(const SCEV *S) {
5693 return getSignedRange(S).getSignedMax().isNegative();
5694 }
5695
isKnownPositive(const SCEV * S)5696 bool ScalarEvolution::isKnownPositive(const SCEV *S) {
5697 return getSignedRange(S).getSignedMin().isStrictlyPositive();
5698 }
5699
isKnownNonNegative(const SCEV * S)5700 bool ScalarEvolution::isKnownNonNegative(const SCEV *S) {
5701 return !getSignedRange(S).getSignedMin().isNegative();
5702 }
5703
isKnownNonPositive(const SCEV * S)5704 bool ScalarEvolution::isKnownNonPositive(const SCEV *S) {
5705 return !getSignedRange(S).getSignedMax().isStrictlyPositive();
5706 }
5707
isKnownNonZero(const SCEV * S)5708 bool ScalarEvolution::isKnownNonZero(const SCEV *S) {
5709 return isKnownNegative(S) || isKnownPositive(S);
5710 }
5711
isKnownPredicate(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)5712 bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
5713 const SCEV *LHS, const SCEV *RHS) {
5714 // Canonicalize the inputs first.
5715 (void)SimplifyICmpOperands(Pred, LHS, RHS);
5716
5717 // If LHS or RHS is an addrec, check to see if the condition is true in
5718 // every iteration of the loop.
5719 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
5720 if (isLoopEntryGuardedByCond(
5721 AR->getLoop(), Pred, AR->getStart(), RHS) &&
5722 isLoopBackedgeGuardedByCond(
5723 AR->getLoop(), Pred, AR->getPostIncExpr(*this), RHS))
5724 return true;
5725 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
5726 if (isLoopEntryGuardedByCond(
5727 AR->getLoop(), Pred, LHS, AR->getStart()) &&
5728 isLoopBackedgeGuardedByCond(
5729 AR->getLoop(), Pred, LHS, AR->getPostIncExpr(*this)))
5730 return true;
5731
5732 // Otherwise see what can be done with known constant ranges.
5733 return isKnownPredicateWithRanges(Pred, LHS, RHS);
5734 }
5735
5736 bool
isKnownPredicateWithRanges(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)5737 ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
5738 const SCEV *LHS, const SCEV *RHS) {
5739 if (HasSameValue(LHS, RHS))
5740 return ICmpInst::isTrueWhenEqual(Pred);
5741
5742 // This code is split out from isKnownPredicate because it is called from
5743 // within isLoopEntryGuardedByCond.
5744 switch (Pred) {
5745 default:
5746 llvm_unreachable("Unexpected ICmpInst::Predicate value!");
5747 break;
5748 case ICmpInst::ICMP_SGT:
5749 Pred = ICmpInst::ICMP_SLT;
5750 std::swap(LHS, RHS);
5751 case ICmpInst::ICMP_SLT: {
5752 ConstantRange LHSRange = getSignedRange(LHS);
5753 ConstantRange RHSRange = getSignedRange(RHS);
5754 if (LHSRange.getSignedMax().slt(RHSRange.getSignedMin()))
5755 return true;
5756 if (LHSRange.getSignedMin().sge(RHSRange.getSignedMax()))
5757 return false;
5758 break;
5759 }
5760 case ICmpInst::ICMP_SGE:
5761 Pred = ICmpInst::ICMP_SLE;
5762 std::swap(LHS, RHS);
5763 case ICmpInst::ICMP_SLE: {
5764 ConstantRange LHSRange = getSignedRange(LHS);
5765 ConstantRange RHSRange = getSignedRange(RHS);
5766 if (LHSRange.getSignedMax().sle(RHSRange.getSignedMin()))
5767 return true;
5768 if (LHSRange.getSignedMin().sgt(RHSRange.getSignedMax()))
5769 return false;
5770 break;
5771 }
5772 case ICmpInst::ICMP_UGT:
5773 Pred = ICmpInst::ICMP_ULT;
5774 std::swap(LHS, RHS);
5775 case ICmpInst::ICMP_ULT: {
5776 ConstantRange LHSRange = getUnsignedRange(LHS);
5777 ConstantRange RHSRange = getUnsignedRange(RHS);
5778 if (LHSRange.getUnsignedMax().ult(RHSRange.getUnsignedMin()))
5779 return true;
5780 if (LHSRange.getUnsignedMin().uge(RHSRange.getUnsignedMax()))
5781 return false;
5782 break;
5783 }
5784 case ICmpInst::ICMP_UGE:
5785 Pred = ICmpInst::ICMP_ULE;
5786 std::swap(LHS, RHS);
5787 case ICmpInst::ICMP_ULE: {
5788 ConstantRange LHSRange = getUnsignedRange(LHS);
5789 ConstantRange RHSRange = getUnsignedRange(RHS);
5790 if (LHSRange.getUnsignedMax().ule(RHSRange.getUnsignedMin()))
5791 return true;
5792 if (LHSRange.getUnsignedMin().ugt(RHSRange.getUnsignedMax()))
5793 return false;
5794 break;
5795 }
5796 case ICmpInst::ICMP_NE: {
5797 if (getUnsignedRange(LHS).intersectWith(getUnsignedRange(RHS)).isEmptySet())
5798 return true;
5799 if (getSignedRange(LHS).intersectWith(getSignedRange(RHS)).isEmptySet())
5800 return true;
5801
5802 const SCEV *Diff = getMinusSCEV(LHS, RHS);
5803 if (isKnownNonZero(Diff))
5804 return true;
5805 break;
5806 }
5807 case ICmpInst::ICMP_EQ:
5808 // The check at the top of the function catches the case where
5809 // the values are known to be equal.
5810 break;
5811 }
5812 return false;
5813 }
5814
5815 /// isLoopBackedgeGuardedByCond - Test whether the backedge of the loop is
5816 /// protected by a conditional between LHS and RHS. This is used to
5817 /// to eliminate casts.
5818 bool
isLoopBackedgeGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)5819 ScalarEvolution::isLoopBackedgeGuardedByCond(const Loop *L,
5820 ICmpInst::Predicate Pred,
5821 const SCEV *LHS, const SCEV *RHS) {
5822 // Interpret a null as meaning no loop, where there is obviously no guard
5823 // (interprocedural conditions notwithstanding).
5824 if (!L) return true;
5825
5826 BasicBlock *Latch = L->getLoopLatch();
5827 if (!Latch)
5828 return false;
5829
5830 BranchInst *LoopContinuePredicate =
5831 dyn_cast<BranchInst>(Latch->getTerminator());
5832 if (!LoopContinuePredicate ||
5833 LoopContinuePredicate->isUnconditional())
5834 return false;
5835
5836 return isImpliedCond(Pred, LHS, RHS,
5837 LoopContinuePredicate->getCondition(),
5838 LoopContinuePredicate->getSuccessor(0) != L->getHeader());
5839 }
5840
5841 /// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
5842 /// by a conditional between LHS and RHS. This is used to help avoid max
5843 /// expressions in loop trip counts, and to eliminate casts.
5844 bool
isLoopEntryGuardedByCond(const Loop * L,ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS)5845 ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
5846 ICmpInst::Predicate Pred,
5847 const SCEV *LHS, const SCEV *RHS) {
5848 // Interpret a null as meaning no loop, where there is obviously no guard
5849 // (interprocedural conditions notwithstanding).
5850 if (!L) return false;
5851
5852 // Starting at the loop predecessor, climb up the predecessor chain, as long
5853 // as there are predecessors that can be found that have unique successors
5854 // leading to the original header.
5855 for (std::pair<BasicBlock *, BasicBlock *>
5856 Pair(L->getLoopPredecessor(), L->getHeader());
5857 Pair.first;
5858 Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
5859
5860 BranchInst *LoopEntryPredicate =
5861 dyn_cast<BranchInst>(Pair.first->getTerminator());
5862 if (!LoopEntryPredicate ||
5863 LoopEntryPredicate->isUnconditional())
5864 continue;
5865
5866 if (isImpliedCond(Pred, LHS, RHS,
5867 LoopEntryPredicate->getCondition(),
5868 LoopEntryPredicate->getSuccessor(0) != Pair.second))
5869 return true;
5870 }
5871
5872 return false;
5873 }
5874
5875 /// isImpliedCond - Test whether the condition described by Pred, LHS,
5876 /// and RHS is true whenever the given Cond value evaluates to true.
isImpliedCond(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,Value * FoundCondValue,bool Inverse)5877 bool ScalarEvolution::isImpliedCond(ICmpInst::Predicate Pred,
5878 const SCEV *LHS, const SCEV *RHS,
5879 Value *FoundCondValue,
5880 bool Inverse) {
5881 // Recursively handle And and Or conditions.
5882 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FoundCondValue)) {
5883 if (BO->getOpcode() == Instruction::And) {
5884 if (!Inverse)
5885 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5886 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5887 } else if (BO->getOpcode() == Instruction::Or) {
5888 if (Inverse)
5889 return isImpliedCond(Pred, LHS, RHS, BO->getOperand(0), Inverse) ||
5890 isImpliedCond(Pred, LHS, RHS, BO->getOperand(1), Inverse);
5891 }
5892 }
5893
5894 ICmpInst *ICI = dyn_cast<ICmpInst>(FoundCondValue);
5895 if (!ICI) return false;
5896
5897 // Bail if the ICmp's operands' types are wider than the needed type
5898 // before attempting to call getSCEV on them. This avoids infinite
5899 // recursion, since the analysis of widening casts can require loop
5900 // exit condition information for overflow checking, which would
5901 // lead back here.
5902 if (getTypeSizeInBits(LHS->getType()) <
5903 getTypeSizeInBits(ICI->getOperand(0)->getType()))
5904 return false;
5905
5906 // Now that we found a conditional branch that dominates the loop, check to
5907 // see if it is the comparison we are looking for.
5908 ICmpInst::Predicate FoundPred;
5909 if (Inverse)
5910 FoundPred = ICI->getInversePredicate();
5911 else
5912 FoundPred = ICI->getPredicate();
5913
5914 const SCEV *FoundLHS = getSCEV(ICI->getOperand(0));
5915 const SCEV *FoundRHS = getSCEV(ICI->getOperand(1));
5916
5917 // Balance the types. The case where FoundLHS' type is wider than
5918 // LHS' type is checked for above.
5919 if (getTypeSizeInBits(LHS->getType()) >
5920 getTypeSizeInBits(FoundLHS->getType())) {
5921 if (CmpInst::isSigned(Pred)) {
5922 FoundLHS = getSignExtendExpr(FoundLHS, LHS->getType());
5923 FoundRHS = getSignExtendExpr(FoundRHS, LHS->getType());
5924 } else {
5925 FoundLHS = getZeroExtendExpr(FoundLHS, LHS->getType());
5926 FoundRHS = getZeroExtendExpr(FoundRHS, LHS->getType());
5927 }
5928 }
5929
5930 // Canonicalize the query to match the way instcombine will have
5931 // canonicalized the comparison.
5932 if (SimplifyICmpOperands(Pred, LHS, RHS))
5933 if (LHS == RHS)
5934 return CmpInst::isTrueWhenEqual(Pred);
5935 if (SimplifyICmpOperands(FoundPred, FoundLHS, FoundRHS))
5936 if (FoundLHS == FoundRHS)
5937 return CmpInst::isFalseWhenEqual(Pred);
5938
5939 // Check to see if we can make the LHS or RHS match.
5940 if (LHS == FoundRHS || RHS == FoundLHS) {
5941 if (isa<SCEVConstant>(RHS)) {
5942 std::swap(FoundLHS, FoundRHS);
5943 FoundPred = ICmpInst::getSwappedPredicate(FoundPred);
5944 } else {
5945 std::swap(LHS, RHS);
5946 Pred = ICmpInst::getSwappedPredicate(Pred);
5947 }
5948 }
5949
5950 // Check whether the found predicate is the same as the desired predicate.
5951 if (FoundPred == Pred)
5952 return isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS);
5953
5954 // Check whether swapping the found predicate makes it the same as the
5955 // desired predicate.
5956 if (ICmpInst::getSwappedPredicate(FoundPred) == Pred) {
5957 if (isa<SCEVConstant>(RHS))
5958 return isImpliedCondOperands(Pred, LHS, RHS, FoundRHS, FoundLHS);
5959 else
5960 return isImpliedCondOperands(ICmpInst::getSwappedPredicate(Pred),
5961 RHS, LHS, FoundLHS, FoundRHS);
5962 }
5963
5964 // Check whether the actual condition is beyond sufficient.
5965 if (FoundPred == ICmpInst::ICMP_EQ)
5966 if (ICmpInst::isTrueWhenEqual(Pred))
5967 if (isImpliedCondOperands(Pred, LHS, RHS, FoundLHS, FoundRHS))
5968 return true;
5969 if (Pred == ICmpInst::ICMP_NE)
5970 if (!ICmpInst::isTrueWhenEqual(FoundPred))
5971 if (isImpliedCondOperands(FoundPred, LHS, RHS, FoundLHS, FoundRHS))
5972 return true;
5973
5974 // Otherwise assume the worst.
5975 return false;
5976 }
5977
5978 /// isImpliedCondOperands - Test whether the condition described by Pred,
5979 /// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
5980 /// and FoundRHS is true.
isImpliedCondOperands(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)5981 bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
5982 const SCEV *LHS, const SCEV *RHS,
5983 const SCEV *FoundLHS,
5984 const SCEV *FoundRHS) {
5985 return isImpliedCondOperandsHelper(Pred, LHS, RHS,
5986 FoundLHS, FoundRHS) ||
5987 // ~x < ~y --> x > y
5988 isImpliedCondOperandsHelper(Pred, LHS, RHS,
5989 getNotSCEV(FoundRHS),
5990 getNotSCEV(FoundLHS));
5991 }
5992
5993 /// isImpliedCondOperandsHelper - Test whether the condition described by
5994 /// Pred, LHS, and RHS is true whenever the condition described by Pred,
5995 /// FoundLHS, and FoundRHS is true.
5996 bool
isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,const SCEV * LHS,const SCEV * RHS,const SCEV * FoundLHS,const SCEV * FoundRHS)5997 ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
5998 const SCEV *LHS, const SCEV *RHS,
5999 const SCEV *FoundLHS,
6000 const SCEV *FoundRHS) {
6001 switch (Pred) {
6002 default: llvm_unreachable("Unexpected ICmpInst::Predicate value!");
6003 case ICmpInst::ICMP_EQ:
6004 case ICmpInst::ICMP_NE:
6005 if (HasSameValue(LHS, FoundLHS) && HasSameValue(RHS, FoundRHS))
6006 return true;
6007 break;
6008 case ICmpInst::ICMP_SLT:
6009 case ICmpInst::ICMP_SLE:
6010 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
6011 isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
6012 return true;
6013 break;
6014 case ICmpInst::ICMP_SGT:
6015 case ICmpInst::ICMP_SGE:
6016 if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
6017 isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
6018 return true;
6019 break;
6020 case ICmpInst::ICMP_ULT:
6021 case ICmpInst::ICMP_ULE:
6022 if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
6023 isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
6024 return true;
6025 break;
6026 case ICmpInst::ICMP_UGT:
6027 case ICmpInst::ICMP_UGE:
6028 if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
6029 isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
6030 return true;
6031 break;
6032 }
6033
6034 return false;
6035 }
6036
6037 /// getBECount - Subtract the end and start values and divide by the step,
6038 /// rounding up, to get the number of times the backedge is executed. Return
6039 /// CouldNotCompute if an intermediate computation overflows.
getBECount(const SCEV * Start,const SCEV * End,const SCEV * Step,bool NoWrap)6040 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
6041 const SCEV *End,
6042 const SCEV *Step,
6043 bool NoWrap) {
6044 assert(!isKnownNegative(Step) &&
6045 "This code doesn't handle negative strides yet!");
6046
6047 Type *Ty = Start->getType();
6048
6049 // When Start == End, we have an exact BECount == 0. Short-circuit this case
6050 // here because SCEV may not be able to determine that the unsigned division
6051 // after rounding is zero.
6052 if (Start == End)
6053 return getConstant(Ty, 0);
6054
6055 const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
6056 const SCEV *Diff = getMinusSCEV(End, Start);
6057 const SCEV *RoundUp = getAddExpr(Step, NegOne);
6058
6059 // Add an adjustment to the difference between End and Start so that
6060 // the division will effectively round up.
6061 const SCEV *Add = getAddExpr(Diff, RoundUp);
6062
6063 if (!NoWrap) {
6064 // Check Add for unsigned overflow.
6065 // TODO: More sophisticated things could be done here.
6066 Type *WideTy = IntegerType::get(getContext(),
6067 getTypeSizeInBits(Ty) + 1);
6068 const SCEV *EDiff = getZeroExtendExpr(Diff, WideTy);
6069 const SCEV *ERoundUp = getZeroExtendExpr(RoundUp, WideTy);
6070 const SCEV *OperandExtendedAdd = getAddExpr(EDiff, ERoundUp);
6071 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
6072 return getCouldNotCompute();
6073 }
6074
6075 return getUDivExpr(Add, Step);
6076 }
6077
6078 /// HowManyLessThans - Return the number of times a backedge containing the
6079 /// specified less-than comparison will execute. If not computable, return
6080 /// CouldNotCompute.
6081 ScalarEvolution::ExitLimit
HowManyLessThans(const SCEV * LHS,const SCEV * RHS,const Loop * L,bool isSigned)6082 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
6083 const Loop *L, bool isSigned) {
6084 // Only handle: "ADDREC < LoopInvariant".
6085 if (!isLoopInvariant(RHS, L)) return getCouldNotCompute();
6086
6087 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
6088 if (!AddRec || AddRec->getLoop() != L)
6089 return getCouldNotCompute();
6090
6091 // Check to see if we have a flag which makes analysis easy.
6092 bool NoWrap = isSigned ? AddRec->getNoWrapFlags(SCEV::FlagNSW) :
6093 AddRec->getNoWrapFlags(SCEV::FlagNUW);
6094
6095 if (AddRec->isAffine()) {
6096 unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
6097 const SCEV *Step = AddRec->getStepRecurrence(*this);
6098
6099 if (Step->isZero())
6100 return getCouldNotCompute();
6101 if (Step->isOne()) {
6102 // With unit stride, the iteration never steps past the limit value.
6103 } else if (isKnownPositive(Step)) {
6104 // Test whether a positive iteration can step past the limit
6105 // value and past the maximum value for its type in a single step.
6106 // Note that it's not sufficient to check NoWrap here, because even
6107 // though the value after a wrap is undefined, it's not undefined
6108 // behavior, so if wrap does occur, the loop could either terminate or
6109 // loop infinitely, but in either case, the loop is guaranteed to
6110 // iterate at least until the iteration where the wrapping occurs.
6111 const SCEV *One = getConstant(Step->getType(), 1);
6112 if (isSigned) {
6113 APInt Max = APInt::getSignedMaxValue(BitWidth);
6114 if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
6115 .slt(getSignedRange(RHS).getSignedMax()))
6116 return getCouldNotCompute();
6117 } else {
6118 APInt Max = APInt::getMaxValue(BitWidth);
6119 if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
6120 .ult(getUnsignedRange(RHS).getUnsignedMax()))
6121 return getCouldNotCompute();
6122 }
6123 } else
6124 // TODO: Handle negative strides here and below.
6125 return getCouldNotCompute();
6126
6127 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
6128 // m. So, we count the number of iterations in which {n,+,s} < m is true.
6129 // Note that we cannot simply return max(m-n,0)/s because it's not safe to
6130 // treat m-n as signed nor unsigned due to overflow possibility.
6131
6132 // First, we get the value of the LHS in the first iteration: n
6133 const SCEV *Start = AddRec->getOperand(0);
6134
6135 // Determine the minimum constant start value.
6136 const SCEV *MinStart = getConstant(isSigned ?
6137 getSignedRange(Start).getSignedMin() :
6138 getUnsignedRange(Start).getUnsignedMin());
6139
6140 // If we know that the condition is true in order to enter the loop,
6141 // then we know that it will run exactly (m-n)/s times. Otherwise, we
6142 // only know that it will execute (max(m,n)-n)/s times. In both cases,
6143 // the division must round up.
6144 const SCEV *End = RHS;
6145 if (!isLoopEntryGuardedByCond(L,
6146 isSigned ? ICmpInst::ICMP_SLT :
6147 ICmpInst::ICMP_ULT,
6148 getMinusSCEV(Start, Step), RHS))
6149 End = isSigned ? getSMaxExpr(RHS, Start)
6150 : getUMaxExpr(RHS, Start);
6151
6152 // Determine the maximum constant end value.
6153 const SCEV *MaxEnd = getConstant(isSigned ?
6154 getSignedRange(End).getSignedMax() :
6155 getUnsignedRange(End).getUnsignedMax());
6156
6157 // If MaxEnd is within a step of the maximum integer value in its type,
6158 // adjust it down to the minimum value which would produce the same effect.
6159 // This allows the subsequent ceiling division of (N+(step-1))/step to
6160 // compute the correct value.
6161 const SCEV *StepMinusOne = getMinusSCEV(Step,
6162 getConstant(Step->getType(), 1));
6163 MaxEnd = isSigned ?
6164 getSMinExpr(MaxEnd,
6165 getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
6166 StepMinusOne)) :
6167 getUMinExpr(MaxEnd,
6168 getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
6169 StepMinusOne));
6170
6171 // Finally, we subtract these two values and divide, rounding up, to get
6172 // the number of times the backedge is executed.
6173 const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
6174
6175 // The maximum backedge count is similar, except using the minimum start
6176 // value and the maximum end value.
6177 // If we already have an exact constant BECount, use it instead.
6178 const SCEV *MaxBECount = isa<SCEVConstant>(BECount) ? BECount
6179 : getBECount(MinStart, MaxEnd, Step, NoWrap);
6180
6181 // If the stride is nonconstant, and NoWrap == true, then
6182 // getBECount(MinStart, MaxEnd) may not compute. This would result in an
6183 // exact BECount and invalid MaxBECount, which should be avoided to catch
6184 // more optimization opportunities.
6185 if (isa<SCEVCouldNotCompute>(MaxBECount))
6186 MaxBECount = BECount;
6187
6188 return ExitLimit(BECount, MaxBECount);
6189 }
6190
6191 return getCouldNotCompute();
6192 }
6193
6194 /// getNumIterationsInRange - Return the number of iterations of this loop that
6195 /// produce values in the specified constant range. Another way of looking at
6196 /// this is that it returns the first iteration number where the value is not in
6197 /// the condition, thus computing the exit count. If the iteration count can't
6198 /// be computed, an instance of SCEVCouldNotCompute is returned.
getNumIterationsInRange(ConstantRange Range,ScalarEvolution & SE) const6199 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
6200 ScalarEvolution &SE) const {
6201 if (Range.isFullSet()) // Infinite loop.
6202 return SE.getCouldNotCompute();
6203
6204 // If the start is a non-zero constant, shift the range to simplify things.
6205 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
6206 if (!SC->getValue()->isZero()) {
6207 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
6208 Operands[0] = SE.getConstant(SC->getType(), 0);
6209 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
6210 getNoWrapFlags(FlagNW));
6211 if (const SCEVAddRecExpr *ShiftedAddRec =
6212 dyn_cast<SCEVAddRecExpr>(Shifted))
6213 return ShiftedAddRec->getNumIterationsInRange(
6214 Range.subtract(SC->getValue()->getValue()), SE);
6215 // This is strange and shouldn't happen.
6216 return SE.getCouldNotCompute();
6217 }
6218
6219 // The only time we can solve this is when we have all constant indices.
6220 // Otherwise, we cannot determine the overflow conditions.
6221 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
6222 if (!isa<SCEVConstant>(getOperand(i)))
6223 return SE.getCouldNotCompute();
6224
6225
6226 // Okay at this point we know that all elements of the chrec are constants and
6227 // that the start element is zero.
6228
6229 // First check to see if the range contains zero. If not, the first
6230 // iteration exits.
6231 unsigned BitWidth = SE.getTypeSizeInBits(getType());
6232 if (!Range.contains(APInt(BitWidth, 0)))
6233 return SE.getConstant(getType(), 0);
6234
6235 if (isAffine()) {
6236 // If this is an affine expression then we have this situation:
6237 // Solve {0,+,A} in Range === Ax in Range
6238
6239 // We know that zero is in the range. If A is positive then we know that
6240 // the upper value of the range must be the first possible exit value.
6241 // If A is negative then the lower of the range is the last possible loop
6242 // value. Also note that we already checked for a full range.
6243 APInt One(BitWidth,1);
6244 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
6245 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
6246
6247 // The exit value should be (End+A)/A.
6248 APInt ExitVal = (End + A).udiv(A);
6249 ConstantInt *ExitValue = ConstantInt::get(SE.getContext(), ExitVal);
6250
6251 // Evaluate at the exit value. If we really did fall out of the valid
6252 // range, then we computed our trip count, otherwise wrap around or other
6253 // things must have happened.
6254 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
6255 if (Range.contains(Val->getValue()))
6256 return SE.getCouldNotCompute(); // Something strange happened
6257
6258 // Ensure that the previous value is in the range. This is a sanity check.
6259 assert(Range.contains(
6260 EvaluateConstantChrecAtConstant(this,
6261 ConstantInt::get(SE.getContext(), ExitVal - One), SE)->getValue()) &&
6262 "Linear scev computation is off in a bad way!");
6263 return SE.getConstant(ExitValue);
6264 } else if (isQuadratic()) {
6265 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
6266 // quadratic equation to solve it. To do this, we must frame our problem in
6267 // terms of figuring out when zero is crossed, instead of when
6268 // Range.getUpper() is crossed.
6269 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
6270 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
6271 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
6272 // getNoWrapFlags(FlagNW)
6273 FlagAnyWrap);
6274
6275 // Next, solve the constructed addrec
6276 std::pair<const SCEV *,const SCEV *> Roots =
6277 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
6278 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
6279 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
6280 if (R1) {
6281 // Pick the smallest positive root value.
6282 if (ConstantInt *CB =
6283 dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
6284 R1->getValue(), R2->getValue()))) {
6285 if (CB->getZExtValue() == false)
6286 std::swap(R1, R2); // R1 is the minimum root now.
6287
6288 // Make sure the root is not off by one. The returned iteration should
6289 // not be in the range, but the previous one should be. When solving
6290 // for "X*X < 5", for example, we should not return a root of 2.
6291 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
6292 R1->getValue(),
6293 SE);
6294 if (Range.contains(R1Val->getValue())) {
6295 // The next iteration must be out of the range...
6296 ConstantInt *NextVal =
6297 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()+1);
6298
6299 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
6300 if (!Range.contains(R1Val->getValue()))
6301 return SE.getConstant(NextVal);
6302 return SE.getCouldNotCompute(); // Something strange happened
6303 }
6304
6305 // If R1 was not in the range, then it is a good return value. Make
6306 // sure that R1-1 WAS in the range though, just in case.
6307 ConstantInt *NextVal =
6308 ConstantInt::get(SE.getContext(), R1->getValue()->getValue()-1);
6309 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
6310 if (Range.contains(R1Val->getValue()))
6311 return R1;
6312 return SE.getCouldNotCompute(); // Something strange happened
6313 }
6314 }
6315 }
6316
6317 return SE.getCouldNotCompute();
6318 }
6319
6320
6321
6322 //===----------------------------------------------------------------------===//
6323 // SCEVCallbackVH Class Implementation
6324 //===----------------------------------------------------------------------===//
6325
deleted()6326 void ScalarEvolution::SCEVCallbackVH::deleted() {
6327 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
6328 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
6329 SE->ConstantEvolutionLoopExitValue.erase(PN);
6330 SE->ValueExprMap.erase(getValPtr());
6331 // this now dangles!
6332 }
6333
allUsesReplacedWith(Value * V)6334 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *V) {
6335 assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
6336
6337 // Forget all the expressions associated with users of the old value,
6338 // so that future queries will recompute the expressions using the new
6339 // value.
6340 Value *Old = getValPtr();
6341 SmallVector<User *, 16> Worklist;
6342 SmallPtrSet<User *, 8> Visited;
6343 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
6344 UI != UE; ++UI)
6345 Worklist.push_back(*UI);
6346 while (!Worklist.empty()) {
6347 User *U = Worklist.pop_back_val();
6348 // Deleting the Old value will cause this to dangle. Postpone
6349 // that until everything else is done.
6350 if (U == Old)
6351 continue;
6352 if (!Visited.insert(U))
6353 continue;
6354 if (PHINode *PN = dyn_cast<PHINode>(U))
6355 SE->ConstantEvolutionLoopExitValue.erase(PN);
6356 SE->ValueExprMap.erase(U);
6357 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
6358 UI != UE; ++UI)
6359 Worklist.push_back(*UI);
6360 }
6361 // Delete the Old value.
6362 if (PHINode *PN = dyn_cast<PHINode>(Old))
6363 SE->ConstantEvolutionLoopExitValue.erase(PN);
6364 SE->ValueExprMap.erase(Old);
6365 // this now dangles!
6366 }
6367
SCEVCallbackVH(Value * V,ScalarEvolution * se)6368 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
6369 : CallbackVH(V), SE(se) {}
6370
6371 //===----------------------------------------------------------------------===//
6372 // ScalarEvolution Class Implementation
6373 //===----------------------------------------------------------------------===//
6374
ScalarEvolution()6375 ScalarEvolution::ScalarEvolution()
6376 : FunctionPass(ID), FirstUnknown(0) {
6377 initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
6378 }
6379
runOnFunction(Function & F)6380 bool ScalarEvolution::runOnFunction(Function &F) {
6381 this->F = &F;
6382 LI = &getAnalysis<LoopInfo>();
6383 TD = getAnalysisIfAvailable<TargetData>();
6384 DT = &getAnalysis<DominatorTree>();
6385 return false;
6386 }
6387
releaseMemory()6388 void ScalarEvolution::releaseMemory() {
6389 // Iterate through all the SCEVUnknown instances and call their
6390 // destructors, so that they release their references to their values.
6391 for (SCEVUnknown *U = FirstUnknown; U; U = U->Next)
6392 U->~SCEVUnknown();
6393 FirstUnknown = 0;
6394
6395 ValueExprMap.clear();
6396
6397 // Free any extra memory created for ExitNotTakenInfo in the unlikely event
6398 // that a loop had multiple computable exits.
6399 for (DenseMap<const Loop*, BackedgeTakenInfo>::iterator I =
6400 BackedgeTakenCounts.begin(), E = BackedgeTakenCounts.end();
6401 I != E; ++I) {
6402 I->second.clear();
6403 }
6404
6405 BackedgeTakenCounts.clear();
6406 ConstantEvolutionLoopExitValue.clear();
6407 ValuesAtScopes.clear();
6408 LoopDispositions.clear();
6409 BlockDispositions.clear();
6410 UnsignedRanges.clear();
6411 SignedRanges.clear();
6412 UniqueSCEVs.clear();
6413 SCEVAllocator.Reset();
6414 }
6415
getAnalysisUsage(AnalysisUsage & AU) const6416 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
6417 AU.setPreservesAll();
6418 AU.addRequiredTransitive<LoopInfo>();
6419 AU.addRequiredTransitive<DominatorTree>();
6420 }
6421
hasLoopInvariantBackedgeTakenCount(const Loop * L)6422 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
6423 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
6424 }
6425
PrintLoopInfo(raw_ostream & OS,ScalarEvolution * SE,const Loop * L)6426 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
6427 const Loop *L) {
6428 // Print all inner loops first
6429 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
6430 PrintLoopInfo(OS, SE, *I);
6431
6432 OS << "Loop ";
6433 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
6434 OS << ": ";
6435
6436 SmallVector<BasicBlock *, 8> ExitBlocks;
6437 L->getExitBlocks(ExitBlocks);
6438 if (ExitBlocks.size() != 1)
6439 OS << "<multiple exits> ";
6440
6441 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
6442 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
6443 } else {
6444 OS << "Unpredictable backedge-taken count. ";
6445 }
6446
6447 OS << "\n"
6448 "Loop ";
6449 WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
6450 OS << ": ";
6451
6452 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
6453 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
6454 } else {
6455 OS << "Unpredictable max backedge-taken count. ";
6456 }
6457
6458 OS << "\n";
6459 }
6460
print(raw_ostream & OS,const Module *) const6461 void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
6462 // ScalarEvolution's implementation of the print method is to print
6463 // out SCEV values of all instructions that are interesting. Doing
6464 // this potentially causes it to create new SCEV objects though,
6465 // which technically conflicts with the const qualifier. This isn't
6466 // observable from outside the class though, so casting away the
6467 // const isn't dangerous.
6468 ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
6469
6470 OS << "Classifying expressions for: ";
6471 WriteAsOperand(OS, F, /*PrintType=*/false);
6472 OS << "\n";
6473 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
6474 if (isSCEVable(I->getType()) && !isa<CmpInst>(*I)) {
6475 OS << *I << '\n';
6476 OS << " --> ";
6477 const SCEV *SV = SE.getSCEV(&*I);
6478 SV->print(OS);
6479
6480 const Loop *L = LI->getLoopFor((*I).getParent());
6481
6482 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
6483 if (AtUse != SV) {
6484 OS << " --> ";
6485 AtUse->print(OS);
6486 }
6487
6488 if (L) {
6489 OS << "\t\t" "Exits: ";
6490 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
6491 if (!SE.isLoopInvariant(ExitValue, L)) {
6492 OS << "<<Unknown>>";
6493 } else {
6494 OS << *ExitValue;
6495 }
6496 }
6497
6498 OS << "\n";
6499 }
6500
6501 OS << "Determining loop execution counts for: ";
6502 WriteAsOperand(OS, F, /*PrintType=*/false);
6503 OS << "\n";
6504 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
6505 PrintLoopInfo(OS, &SE, *I);
6506 }
6507
6508 ScalarEvolution::LoopDisposition
getLoopDisposition(const SCEV * S,const Loop * L)6509 ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
6510 std::map<const Loop *, LoopDisposition> &Values = LoopDispositions[S];
6511 std::pair<std::map<const Loop *, LoopDisposition>::iterator, bool> Pair =
6512 Values.insert(std::make_pair(L, LoopVariant));
6513 if (!Pair.second)
6514 return Pair.first->second;
6515
6516 LoopDisposition D = computeLoopDisposition(S, L);
6517 return LoopDispositions[S][L] = D;
6518 }
6519
6520 ScalarEvolution::LoopDisposition
computeLoopDisposition(const SCEV * S,const Loop * L)6521 ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
6522 switch (S->getSCEVType()) {
6523 case scConstant:
6524 return LoopInvariant;
6525 case scTruncate:
6526 case scZeroExtend:
6527 case scSignExtend:
6528 return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
6529 case scAddRecExpr: {
6530 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
6531
6532 // If L is the addrec's loop, it's computable.
6533 if (AR->getLoop() == L)
6534 return LoopComputable;
6535
6536 // Add recurrences are never invariant in the function-body (null loop).
6537 if (!L)
6538 return LoopVariant;
6539
6540 // This recurrence is variant w.r.t. L if L contains AR's loop.
6541 if (L->contains(AR->getLoop()))
6542 return LoopVariant;
6543
6544 // This recurrence is invariant w.r.t. L if AR's loop contains L.
6545 if (AR->getLoop()->contains(L))
6546 return LoopInvariant;
6547
6548 // This recurrence is variant w.r.t. L if any of its operands
6549 // are variant.
6550 for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
6551 I != E; ++I)
6552 if (!isLoopInvariant(*I, L))
6553 return LoopVariant;
6554
6555 // Otherwise it's loop-invariant.
6556 return LoopInvariant;
6557 }
6558 case scAddExpr:
6559 case scMulExpr:
6560 case scUMaxExpr:
6561 case scSMaxExpr: {
6562 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
6563 bool HasVarying = false;
6564 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
6565 I != E; ++I) {
6566 LoopDisposition D = getLoopDisposition(*I, L);
6567 if (D == LoopVariant)
6568 return LoopVariant;
6569 if (D == LoopComputable)
6570 HasVarying = true;
6571 }
6572 return HasVarying ? LoopComputable : LoopInvariant;
6573 }
6574 case scUDivExpr: {
6575 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
6576 LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
6577 if (LD == LoopVariant)
6578 return LoopVariant;
6579 LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
6580 if (RD == LoopVariant)
6581 return LoopVariant;
6582 return (LD == LoopInvariant && RD == LoopInvariant) ?
6583 LoopInvariant : LoopComputable;
6584 }
6585 case scUnknown:
6586 // All non-instruction values are loop invariant. All instructions are loop
6587 // invariant if they are not contained in the specified loop.
6588 // Instructions are never considered invariant in the function body
6589 // (null loop) because they are defined within the "loop".
6590 if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
6591 return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
6592 return LoopInvariant;
6593 case scCouldNotCompute:
6594 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6595 return LoopVariant;
6596 default: break;
6597 }
6598 llvm_unreachable("Unknown SCEV kind!");
6599 return LoopVariant;
6600 }
6601
isLoopInvariant(const SCEV * S,const Loop * L)6602 bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
6603 return getLoopDisposition(S, L) == LoopInvariant;
6604 }
6605
hasComputableLoopEvolution(const SCEV * S,const Loop * L)6606 bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
6607 return getLoopDisposition(S, L) == LoopComputable;
6608 }
6609
6610 ScalarEvolution::BlockDisposition
getBlockDisposition(const SCEV * S,const BasicBlock * BB)6611 ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
6612 std::map<const BasicBlock *, BlockDisposition> &Values = BlockDispositions[S];
6613 std::pair<std::map<const BasicBlock *, BlockDisposition>::iterator, bool>
6614 Pair = Values.insert(std::make_pair(BB, DoesNotDominateBlock));
6615 if (!Pair.second)
6616 return Pair.first->second;
6617
6618 BlockDisposition D = computeBlockDisposition(S, BB);
6619 return BlockDispositions[S][BB] = D;
6620 }
6621
6622 ScalarEvolution::BlockDisposition
computeBlockDisposition(const SCEV * S,const BasicBlock * BB)6623 ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
6624 switch (S->getSCEVType()) {
6625 case scConstant:
6626 return ProperlyDominatesBlock;
6627 case scTruncate:
6628 case scZeroExtend:
6629 case scSignExtend:
6630 return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
6631 case scAddRecExpr: {
6632 // This uses a "dominates" query instead of "properly dominates" query
6633 // to test for proper dominance too, because the instruction which
6634 // produces the addrec's value is a PHI, and a PHI effectively properly
6635 // dominates its entire containing block.
6636 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
6637 if (!DT->dominates(AR->getLoop()->getHeader(), BB))
6638 return DoesNotDominateBlock;
6639 }
6640 // FALL THROUGH into SCEVNAryExpr handling.
6641 case scAddExpr:
6642 case scMulExpr:
6643 case scUMaxExpr:
6644 case scSMaxExpr: {
6645 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
6646 bool Proper = true;
6647 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
6648 I != E; ++I) {
6649 BlockDisposition D = getBlockDisposition(*I, BB);
6650 if (D == DoesNotDominateBlock)
6651 return DoesNotDominateBlock;
6652 if (D == DominatesBlock)
6653 Proper = false;
6654 }
6655 return Proper ? ProperlyDominatesBlock : DominatesBlock;
6656 }
6657 case scUDivExpr: {
6658 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
6659 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
6660 BlockDisposition LD = getBlockDisposition(LHS, BB);
6661 if (LD == DoesNotDominateBlock)
6662 return DoesNotDominateBlock;
6663 BlockDisposition RD = getBlockDisposition(RHS, BB);
6664 if (RD == DoesNotDominateBlock)
6665 return DoesNotDominateBlock;
6666 return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
6667 ProperlyDominatesBlock : DominatesBlock;
6668 }
6669 case scUnknown:
6670 if (Instruction *I =
6671 dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
6672 if (I->getParent() == BB)
6673 return DominatesBlock;
6674 if (DT->properlyDominates(I->getParent(), BB))
6675 return ProperlyDominatesBlock;
6676 return DoesNotDominateBlock;
6677 }
6678 return ProperlyDominatesBlock;
6679 case scCouldNotCompute:
6680 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6681 return DoesNotDominateBlock;
6682 default: break;
6683 }
6684 llvm_unreachable("Unknown SCEV kind!");
6685 return DoesNotDominateBlock;
6686 }
6687
dominates(const SCEV * S,const BasicBlock * BB)6688 bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
6689 return getBlockDisposition(S, BB) >= DominatesBlock;
6690 }
6691
properlyDominates(const SCEV * S,const BasicBlock * BB)6692 bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
6693 return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
6694 }
6695
hasOperand(const SCEV * S,const SCEV * Op) const6696 bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
6697 switch (S->getSCEVType()) {
6698 case scConstant:
6699 return false;
6700 case scTruncate:
6701 case scZeroExtend:
6702 case scSignExtend: {
6703 const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S);
6704 const SCEV *CastOp = Cast->getOperand();
6705 return Op == CastOp || hasOperand(CastOp, Op);
6706 }
6707 case scAddRecExpr:
6708 case scAddExpr:
6709 case scMulExpr:
6710 case scUMaxExpr:
6711 case scSMaxExpr: {
6712 const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
6713 for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
6714 I != E; ++I) {
6715 const SCEV *NAryOp = *I;
6716 if (NAryOp == Op || hasOperand(NAryOp, Op))
6717 return true;
6718 }
6719 return false;
6720 }
6721 case scUDivExpr: {
6722 const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
6723 const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
6724 return LHS == Op || hasOperand(LHS, Op) ||
6725 RHS == Op || hasOperand(RHS, Op);
6726 }
6727 case scUnknown:
6728 return false;
6729 case scCouldNotCompute:
6730 llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
6731 return false;
6732 default: break;
6733 }
6734 llvm_unreachable("Unknown SCEV kind!");
6735 return false;
6736 }
6737
forgetMemoizedResults(const SCEV * S)6738 void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
6739 ValuesAtScopes.erase(S);
6740 LoopDispositions.erase(S);
6741 BlockDispositions.erase(S);
6742 UnsignedRanges.erase(S);
6743 SignedRanges.erase(S);
6744 }
6745