1 //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements some loop unrolling utilities. It does not define any
11 // actual pass or policy, but provides a single function to perform loop
12 // unrolling.
13 //
14 // The process of unrolling can produce extraneous basic blocks linked with
15 // unconditional branches. This will be corrected in the future.
16 //
17 //===----------------------------------------------------------------------===//
18
19 #define DEBUG_TYPE "loop-unroll"
20 #include "llvm/Transforms/Utils/UnrollLoop.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/LoopIterator.h"
24 #include "llvm/Analysis/LoopPass.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/IR/BasicBlock.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
30 #include "llvm/Transforms/Utils/Cloning.h"
31 #include "llvm/Transforms/Utils/Local.h"
32 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
33 using namespace llvm;
34
35 // TODO: Should these be here or in LoopUnroll?
36 STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
37 STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
38
39 /// RemapInstruction - Convert the instruction operands from referencing the
40 /// current values into those specified by VMap.
RemapInstruction(Instruction * I,ValueToValueMapTy & VMap)41 static inline void RemapInstruction(Instruction *I,
42 ValueToValueMapTy &VMap) {
43 for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
44 Value *Op = I->getOperand(op);
45 ValueToValueMapTy::iterator It = VMap.find(Op);
46 if (It != VMap.end())
47 I->setOperand(op, It->second);
48 }
49
50 if (PHINode *PN = dyn_cast<PHINode>(I)) {
51 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
52 ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i));
53 if (It != VMap.end())
54 PN->setIncomingBlock(i, cast<BasicBlock>(It->second));
55 }
56 }
57 }
58
59 /// FoldBlockIntoPredecessor - Folds a basic block into its predecessor if it
60 /// only has one predecessor, and that predecessor only has one successor.
61 /// The LoopInfo Analysis that is passed will be kept consistent.
62 /// Returns the new combined block.
FoldBlockIntoPredecessor(BasicBlock * BB,LoopInfo * LI,LPPassManager * LPM)63 static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
64 LPPassManager *LPM) {
65 // Merge basic blocks into their predecessor if there is only one distinct
66 // pred, and if there is only one distinct successor of the predecessor, and
67 // if there are no PHI nodes.
68 BasicBlock *OnlyPred = BB->getSinglePredecessor();
69 if (!OnlyPred) return 0;
70
71 if (OnlyPred->getTerminator()->getNumSuccessors() != 1)
72 return 0;
73
74 DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred);
75
76 // Resolve any PHI nodes at the start of the block. They are all
77 // guaranteed to have exactly one entry if they exist, unless there are
78 // multiple duplicate (but guaranteed to be equal) entries for the
79 // incoming edges. This occurs when there are multiple edges from
80 // OnlyPred to OnlySucc.
81 FoldSingleEntryPHINodes(BB);
82
83 // Delete the unconditional branch from the predecessor...
84 OnlyPred->getInstList().pop_back();
85
86 // Make all PHI nodes that referred to BB now refer to Pred as their
87 // source...
88 BB->replaceAllUsesWith(OnlyPred);
89
90 // Move all definitions in the successor to the predecessor...
91 OnlyPred->getInstList().splice(OnlyPred->end(), BB->getInstList());
92
93 std::string OldName = BB->getName();
94
95 // Erase basic block from the function...
96
97 // ScalarEvolution holds references to loop exit blocks.
98 if (LPM) {
99 if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
100 if (Loop *L = LI->getLoopFor(BB))
101 SE->forgetLoop(L);
102 }
103 }
104 LI->removeBlock(BB);
105 BB->eraseFromParent();
106
107 // Inherit predecessor's name if it exists...
108 if (!OldName.empty() && !OnlyPred->hasName())
109 OnlyPred->setName(OldName);
110
111 return OnlyPred;
112 }
113
114 /// Unroll the given loop by Count. The loop must be in LCSSA form. Returns true
115 /// if unrolling was successful, or false if the loop was unmodified. Unrolling
116 /// can only fail when the loop's latch block is not terminated by a conditional
117 /// branch instruction. However, if the trip count (and multiple) are not known,
118 /// loop unrolling will mostly produce more code that is no faster.
119 ///
120 /// TripCount is generally defined as the number of times the loop header
121 /// executes. UnrollLoop relaxes the definition to permit early exits: here
122 /// TripCount is the iteration on which control exits LatchBlock if no early
123 /// exits were taken. Note that UnrollLoop assumes that the loop counter test
124 /// terminates LatchBlock in order to remove unnecesssary instances of the
125 /// test. In other words, control may exit the loop prior to TripCount
126 /// iterations via an early branch, but control may not exit the loop from the
127 /// LatchBlock's terminator prior to TripCount iterations.
128 ///
129 /// Similarly, TripMultiple divides the number of times that the LatchBlock may
130 /// execute without exiting the loop.
131 ///
132 /// The LoopInfo Analysis that is passed will be kept consistent.
133 ///
134 /// If a LoopPassManager is passed in, and the loop is fully removed, it will be
135 /// removed from the LoopPassManager as well. LPM can also be NULL.
136 ///
137 /// This utility preserves LoopInfo. If DominatorTree or ScalarEvolution are
138 /// available it must also preserve those analyses.
UnrollLoop(Loop * L,unsigned Count,unsigned TripCount,bool AllowRuntime,unsigned TripMultiple,LoopInfo * LI,LPPassManager * LPM)139 bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
140 bool AllowRuntime, unsigned TripMultiple,
141 LoopInfo *LI, LPPassManager *LPM) {
142 BasicBlock *Preheader = L->getLoopPreheader();
143 if (!Preheader) {
144 DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
145 return false;
146 }
147
148 BasicBlock *LatchBlock = L->getLoopLatch();
149 if (!LatchBlock) {
150 DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n");
151 return false;
152 }
153
154 // Loops with indirectbr cannot be cloned.
155 if (!L->isSafeToClone()) {
156 DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
157 return false;
158 }
159
160 BasicBlock *Header = L->getHeader();
161 BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
162
163 if (!BI || BI->isUnconditional()) {
164 // The loop-rotate pass can be helpful to avoid this in many cases.
165 DEBUG(dbgs() <<
166 " Can't unroll; loop not terminated by a conditional branch.\n");
167 return false;
168 }
169
170 if (Header->hasAddressTaken()) {
171 // The loop-rotate pass can be helpful to avoid this in many cases.
172 DEBUG(dbgs() <<
173 " Won't unroll loop: address of header block is taken.\n");
174 return false;
175 }
176
177 if (TripCount != 0)
178 DEBUG(dbgs() << " Trip Count = " << TripCount << "\n");
179 if (TripMultiple != 1)
180 DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n");
181
182 // Effectively "DCE" unrolled iterations that are beyond the tripcount
183 // and will never be executed.
184 if (TripCount != 0 && Count > TripCount)
185 Count = TripCount;
186
187 // Don't enter the unroll code if there is nothing to do. This way we don't
188 // need to support "partial unrolling by 1".
189 if (TripCount == 0 && Count < 2)
190 return false;
191
192 assert(Count > 0);
193 assert(TripMultiple > 0);
194 assert(TripCount == 0 || TripCount % TripMultiple == 0);
195
196 // Are we eliminating the loop control altogether?
197 bool CompletelyUnroll = Count == TripCount;
198
199 // We assume a run-time trip count if the compiler cannot
200 // figure out the loop trip count and the unroll-runtime
201 // flag is specified.
202 bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);
203
204 if (RuntimeTripCount && !UnrollRuntimeLoopProlog(L, Count, LI, LPM))
205 return false;
206
207 // Notify ScalarEvolution that the loop will be substantially changed,
208 // if not outright eliminated.
209 if (LPM) {
210 ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
211 if (SE)
212 SE->forgetLoop(L);
213 }
214
215 // If we know the trip count, we know the multiple...
216 unsigned BreakoutTrip = 0;
217 if (TripCount != 0) {
218 BreakoutTrip = TripCount % Count;
219 TripMultiple = 0;
220 } else {
221 // Figure out what multiple to use.
222 BreakoutTrip = TripMultiple =
223 (unsigned)GreatestCommonDivisor64(Count, TripMultiple);
224 }
225
226 if (CompletelyUnroll) {
227 DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
228 << " with trip count " << TripCount << "!\n");
229 } else {
230 DEBUG(dbgs() << "UNROLLING loop %" << Header->getName()
231 << " by " << Count);
232 if (TripMultiple == 0 || BreakoutTrip != TripMultiple) {
233 DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
234 } else if (TripMultiple != 1) {
235 DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
236 } else if (RuntimeTripCount) {
237 DEBUG(dbgs() << " with run-time trip count");
238 }
239 DEBUG(dbgs() << "!\n");
240 }
241
242 std::vector<BasicBlock*> LoopBlocks = L->getBlocks();
243
244 bool ContinueOnTrue = L->contains(BI->getSuccessor(0));
245 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue);
246
247 // For the first iteration of the loop, we should use the precloned values for
248 // PHI nodes. Insert associations now.
249 ValueToValueMapTy LastValueMap;
250 std::vector<PHINode*> OrigPHINode;
251 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
252 OrigPHINode.push_back(cast<PHINode>(I));
253 }
254
255 std::vector<BasicBlock*> Headers;
256 std::vector<BasicBlock*> Latches;
257 Headers.push_back(Header);
258 Latches.push_back(LatchBlock);
259
260 // The current on-the-fly SSA update requires blocks to be processed in
261 // reverse postorder so that LastValueMap contains the correct value at each
262 // exit.
263 LoopBlocksDFS DFS(L);
264 DFS.perform(LI);
265
266 // Stash the DFS iterators before adding blocks to the loop.
267 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
268 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
269
270 for (unsigned It = 1; It != Count; ++It) {
271 std::vector<BasicBlock*> NewBlocks;
272
273 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
274 ValueToValueMapTy VMap;
275 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
276 Header->getParent()->getBasicBlockList().push_back(New);
277
278 // Loop over all of the PHI nodes in the block, changing them to use the
279 // incoming values from the previous block.
280 if (*BB == Header)
281 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
282 PHINode *NewPHI = cast<PHINode>(VMap[OrigPHINode[i]]);
283 Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
284 if (Instruction *InValI = dyn_cast<Instruction>(InVal))
285 if (It > 1 && L->contains(InValI))
286 InVal = LastValueMap[InValI];
287 VMap[OrigPHINode[i]] = InVal;
288 New->getInstList().erase(NewPHI);
289 }
290
291 // Update our running map of newest clones
292 LastValueMap[*BB] = New;
293 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
294 VI != VE; ++VI)
295 LastValueMap[VI->first] = VI->second;
296
297 L->addBasicBlockToLoop(New, LI->getBase());
298
299 // Add phi entries for newly created values to all exit blocks.
300 for (succ_iterator SI = succ_begin(*BB), SE = succ_end(*BB);
301 SI != SE; ++SI) {
302 if (L->contains(*SI))
303 continue;
304 for (BasicBlock::iterator BBI = (*SI)->begin();
305 PHINode *phi = dyn_cast<PHINode>(BBI); ++BBI) {
306 Value *Incoming = phi->getIncomingValueForBlock(*BB);
307 ValueToValueMapTy::iterator It = LastValueMap.find(Incoming);
308 if (It != LastValueMap.end())
309 Incoming = It->second;
310 phi->addIncoming(Incoming, New);
311 }
312 }
313 // Keep track of new headers and latches as we create them, so that
314 // we can insert the proper branches later.
315 if (*BB == Header)
316 Headers.push_back(New);
317 if (*BB == LatchBlock)
318 Latches.push_back(New);
319
320 NewBlocks.push_back(New);
321 }
322
323 // Remap all instructions in the most recent iteration
324 for (unsigned i = 0; i < NewBlocks.size(); ++i)
325 for (BasicBlock::iterator I = NewBlocks[i]->begin(),
326 E = NewBlocks[i]->end(); I != E; ++I)
327 ::RemapInstruction(I, LastValueMap);
328 }
329
330 // Loop over the PHI nodes in the original block, setting incoming values.
331 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
332 PHINode *PN = OrigPHINode[i];
333 if (CompletelyUnroll) {
334 PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader));
335 Header->getInstList().erase(PN);
336 }
337 else if (Count > 1) {
338 Value *InVal = PN->removeIncomingValue(LatchBlock, false);
339 // If this value was defined in the loop, take the value defined by the
340 // last iteration of the loop.
341 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) {
342 if (L->contains(InValI))
343 InVal = LastValueMap[InVal];
344 }
345 assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch");
346 PN->addIncoming(InVal, Latches.back());
347 }
348 }
349
350 // Now that all the basic blocks for the unrolled iterations are in place,
351 // set up the branches to connect them.
352 for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
353 // The original branch was replicated in each unrolled iteration.
354 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
355
356 // The branch destination.
357 unsigned j = (i + 1) % e;
358 BasicBlock *Dest = Headers[j];
359 bool NeedConditional = true;
360
361 if (RuntimeTripCount && j != 0) {
362 NeedConditional = false;
363 }
364
365 // For a complete unroll, make the last iteration end with a branch
366 // to the exit block.
367 if (CompletelyUnroll && j == 0) {
368 Dest = LoopExit;
369 NeedConditional = false;
370 }
371
372 // If we know the trip count or a multiple of it, we can safely use an
373 // unconditional branch for some iterations.
374 if (j != BreakoutTrip && (TripMultiple == 0 || j % TripMultiple != 0)) {
375 NeedConditional = false;
376 }
377
378 if (NeedConditional) {
379 // Update the conditional branch's successor for the following
380 // iteration.
381 Term->setSuccessor(!ContinueOnTrue, Dest);
382 } else {
383 // Remove phi operands at this loop exit
384 if (Dest != LoopExit) {
385 BasicBlock *BB = Latches[i];
386 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB);
387 SI != SE; ++SI) {
388 if (*SI == Headers[i])
389 continue;
390 for (BasicBlock::iterator BBI = (*SI)->begin();
391 PHINode *Phi = dyn_cast<PHINode>(BBI); ++BBI) {
392 Phi->removeIncomingValue(BB, false);
393 }
394 }
395 }
396 // Replace the conditional branch with an unconditional one.
397 BranchInst::Create(Dest, Term);
398 Term->eraseFromParent();
399 }
400 }
401
402 // Merge adjacent basic blocks, if possible.
403 for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
404 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
405 if (Term->isUnconditional()) {
406 BasicBlock *Dest = Term->getSuccessor(0);
407 if (BasicBlock *Fold = FoldBlockIntoPredecessor(Dest, LI, LPM))
408 std::replace(Latches.begin(), Latches.end(), Dest, Fold);
409 }
410 }
411
412 if (LPM) {
413 // FIXME: Reconstruct dom info, because it is not preserved properly.
414 // Incrementally updating domtree after loop unrolling would be easy.
415 if (DominatorTree *DT = LPM->getAnalysisIfAvailable<DominatorTree>())
416 DT->runOnFunction(*L->getHeader()->getParent());
417
418 // Simplify any new induction variables in the partially unrolled loop.
419 ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>();
420 if (SE && !CompletelyUnroll) {
421 SmallVector<WeakVH, 16> DeadInsts;
422 simplifyLoopIVs(L, SE, LPM, DeadInsts);
423
424 // Aggressively clean up dead instructions that simplifyLoopIVs already
425 // identified. Any remaining should be cleaned up below.
426 while (!DeadInsts.empty())
427 if (Instruction *Inst =
428 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
429 RecursivelyDeleteTriviallyDeadInstructions(Inst);
430 }
431 }
432 // At this point, the code is well formed. We now do a quick sweep over the
433 // inserted code, doing constant propagation and dead code elimination as we
434 // go.
435 const std::vector<BasicBlock*> &NewLoopBlocks = L->getBlocks();
436 for (std::vector<BasicBlock*>::const_iterator BB = NewLoopBlocks.begin(),
437 BBE = NewLoopBlocks.end(); BB != BBE; ++BB)
438 for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); I != E; ) {
439 Instruction *Inst = I++;
440
441 if (isInstructionTriviallyDead(Inst))
442 (*BB)->getInstList().erase(Inst);
443 else if (Value *V = SimplifyInstruction(Inst))
444 if (LI->replacementPreservesLCSSAForm(Inst, V)) {
445 Inst->replaceAllUsesWith(V);
446 (*BB)->getInstList().erase(Inst);
447 }
448 }
449
450 NumCompletelyUnrolled += CompletelyUnroll;
451 ++NumUnrolled;
452 // Remove the loop from the LoopPassManager if it's completely removed.
453 if (CompletelyUnroll && LPM != NULL)
454 LPM->deleteLoopFromQueue(L);
455
456 return true;
457 }
458