1 //===-- UnrollLoop.cpp - Loop unrolling utilities -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements some loop unrolling utilities. It does not define any
11 // actual pass or policy, but provides a single function to perform loop
12 // unrolling.
13 //
14 // The process of unrolling can produce extraneous basic blocks linked with
15 // unconditional branches. This will be corrected in the future.
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Transforms/Utils/UnrollLoop.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/InstructionSimplify.h"
22 #include "llvm/Analysis/LoopIterator.h"
23 #include "llvm/Analysis/LoopPass.h"
24 #include "llvm/Analysis/ScalarEvolution.h"
25 #include "llvm/IR/BasicBlock.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/DiagnosticInfo.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
33 #include "llvm/Transforms/Utils/Cloning.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 #include "llvm/Transforms/Utils/LoopUtils.h"
36 #include "llvm/Transforms/Utils/SimplifyIndVar.h"
37 using namespace llvm;
38
39 #define DEBUG_TYPE "loop-unroll"
40
41 // TODO: Should these be here or in LoopUnroll?
42 STATISTIC(NumCompletelyUnrolled, "Number of loops completely unrolled");
43 STATISTIC(NumUnrolled, "Number of loops unrolled (completely or otherwise)");
44
45 /// RemapInstruction - Convert the instruction operands from referencing the
46 /// current values into those specified by VMap.
RemapInstruction(Instruction * I,ValueToValueMapTy & VMap)47 static inline void RemapInstruction(Instruction *I,
48 ValueToValueMapTy &VMap) {
49 for (unsigned op = 0, E = I->getNumOperands(); op != E; ++op) {
50 Value *Op = I->getOperand(op);
51 ValueToValueMapTy::iterator It = VMap.find(Op);
52 if (It != VMap.end())
53 I->setOperand(op, It->second);
54 }
55
56 if (PHINode *PN = dyn_cast<PHINode>(I)) {
57 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
58 ValueToValueMapTy::iterator It = VMap.find(PN->getIncomingBlock(i));
59 if (It != VMap.end())
60 PN->setIncomingBlock(i, cast<BasicBlock>(It->second));
61 }
62 }
63 }
64
65 /// FoldBlockIntoPredecessor - Folds a basic block into its predecessor if it
66 /// only has one predecessor, and that predecessor only has one successor.
67 /// The LoopInfo Analysis that is passed will be kept consistent.
68 /// Returns the new combined block.
FoldBlockIntoPredecessor(BasicBlock * BB,LoopInfo * LI,LPPassManager * LPM)69 static BasicBlock *FoldBlockIntoPredecessor(BasicBlock *BB, LoopInfo* LI,
70 LPPassManager *LPM) {
71 // Merge basic blocks into their predecessor if there is only one distinct
72 // pred, and if there is only one distinct successor of the predecessor, and
73 // if there are no PHI nodes.
74 BasicBlock *OnlyPred = BB->getSinglePredecessor();
75 if (!OnlyPred) return nullptr;
76
77 if (OnlyPred->getTerminator()->getNumSuccessors() != 1)
78 return nullptr;
79
80 DEBUG(dbgs() << "Merging: " << *BB << "into: " << *OnlyPred);
81
82 // Resolve any PHI nodes at the start of the block. They are all
83 // guaranteed to have exactly one entry if they exist, unless there are
84 // multiple duplicate (but guaranteed to be equal) entries for the
85 // incoming edges. This occurs when there are multiple edges from
86 // OnlyPred to OnlySucc.
87 FoldSingleEntryPHINodes(BB);
88
89 // Delete the unconditional branch from the predecessor...
90 OnlyPred->getInstList().pop_back();
91
92 // Make all PHI nodes that referred to BB now refer to Pred as their
93 // source...
94 BB->replaceAllUsesWith(OnlyPred);
95
96 // Move all definitions in the successor to the predecessor...
97 OnlyPred->getInstList().splice(OnlyPred->end(), BB->getInstList());
98
99 // OldName will be valid until erased.
100 StringRef OldName = BB->getName();
101
102 // Erase basic block from the function...
103
104 // ScalarEvolution holds references to loop exit blocks.
105 if (LPM) {
106 if (ScalarEvolution *SE = LPM->getAnalysisIfAvailable<ScalarEvolution>()) {
107 if (Loop *L = LI->getLoopFor(BB))
108 SE->forgetLoop(L);
109 }
110 }
111 LI->removeBlock(BB);
112
113 // Inherit predecessor's name if it exists...
114 if (!OldName.empty() && !OnlyPred->hasName())
115 OnlyPred->setName(OldName);
116
117 BB->eraseFromParent();
118
119 return OnlyPred;
120 }
121
122 /// Unroll the given loop by Count. The loop must be in LCSSA form. Returns true
123 /// if unrolling was successful, or false if the loop was unmodified. Unrolling
124 /// can only fail when the loop's latch block is not terminated by a conditional
125 /// branch instruction. However, if the trip count (and multiple) are not known,
126 /// loop unrolling will mostly produce more code that is no faster.
127 ///
128 /// TripCount is generally defined as the number of times the loop header
129 /// executes. UnrollLoop relaxes the definition to permit early exits: here
130 /// TripCount is the iteration on which control exits LatchBlock if no early
131 /// exits were taken. Note that UnrollLoop assumes that the loop counter test
132 /// terminates LatchBlock in order to remove unnecesssary instances of the
133 /// test. In other words, control may exit the loop prior to TripCount
134 /// iterations via an early branch, but control may not exit the loop from the
135 /// LatchBlock's terminator prior to TripCount iterations.
136 ///
137 /// Similarly, TripMultiple divides the number of times that the LatchBlock may
138 /// execute without exiting the loop.
139 ///
140 /// The LoopInfo Analysis that is passed will be kept consistent.
141 ///
142 /// If a LoopPassManager is passed in, and the loop is fully removed, it will be
143 /// removed from the LoopPassManager as well. LPM can also be NULL.
144 ///
145 /// This utility preserves LoopInfo. If DominatorTree or ScalarEvolution are
146 /// available from the Pass it must also preserve those analyses.
UnrollLoop(Loop * L,unsigned Count,unsigned TripCount,bool AllowRuntime,unsigned TripMultiple,LoopInfo * LI,Pass * PP,LPPassManager * LPM)147 bool llvm::UnrollLoop(Loop *L, unsigned Count, unsigned TripCount,
148 bool AllowRuntime, unsigned TripMultiple,
149 LoopInfo *LI, Pass *PP, LPPassManager *LPM) {
150 BasicBlock *Preheader = L->getLoopPreheader();
151 if (!Preheader) {
152 DEBUG(dbgs() << " Can't unroll; loop preheader-insertion failed.\n");
153 return false;
154 }
155
156 BasicBlock *LatchBlock = L->getLoopLatch();
157 if (!LatchBlock) {
158 DEBUG(dbgs() << " Can't unroll; loop exit-block-insertion failed.\n");
159 return false;
160 }
161
162 // Loops with indirectbr cannot be cloned.
163 if (!L->isSafeToClone()) {
164 DEBUG(dbgs() << " Can't unroll; Loop body cannot be cloned.\n");
165 return false;
166 }
167
168 BasicBlock *Header = L->getHeader();
169 BranchInst *BI = dyn_cast<BranchInst>(LatchBlock->getTerminator());
170
171 if (!BI || BI->isUnconditional()) {
172 // The loop-rotate pass can be helpful to avoid this in many cases.
173 DEBUG(dbgs() <<
174 " Can't unroll; loop not terminated by a conditional branch.\n");
175 return false;
176 }
177
178 if (Header->hasAddressTaken()) {
179 // The loop-rotate pass can be helpful to avoid this in many cases.
180 DEBUG(dbgs() <<
181 " Won't unroll loop: address of header block is taken.\n");
182 return false;
183 }
184
185 if (TripCount != 0)
186 DEBUG(dbgs() << " Trip Count = " << TripCount << "\n");
187 if (TripMultiple != 1)
188 DEBUG(dbgs() << " Trip Multiple = " << TripMultiple << "\n");
189
190 // Effectively "DCE" unrolled iterations that are beyond the tripcount
191 // and will never be executed.
192 if (TripCount != 0 && Count > TripCount)
193 Count = TripCount;
194
195 // Don't enter the unroll code if there is nothing to do. This way we don't
196 // need to support "partial unrolling by 1".
197 if (TripCount == 0 && Count < 2)
198 return false;
199
200 assert(Count > 0);
201 assert(TripMultiple > 0);
202 assert(TripCount == 0 || TripCount % TripMultiple == 0);
203
204 // Are we eliminating the loop control altogether?
205 bool CompletelyUnroll = Count == TripCount;
206
207 // We assume a run-time trip count if the compiler cannot
208 // figure out the loop trip count and the unroll-runtime
209 // flag is specified.
210 bool RuntimeTripCount = (TripCount == 0 && Count > 0 && AllowRuntime);
211
212 if (RuntimeTripCount && !UnrollRuntimeLoopProlog(L, Count, LI, LPM))
213 return false;
214
215 // Notify ScalarEvolution that the loop will be substantially changed,
216 // if not outright eliminated.
217 if (PP) {
218 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
219 if (SE)
220 SE->forgetLoop(L);
221 }
222
223 // If we know the trip count, we know the multiple...
224 unsigned BreakoutTrip = 0;
225 if (TripCount != 0) {
226 BreakoutTrip = TripCount % Count;
227 TripMultiple = 0;
228 } else {
229 // Figure out what multiple to use.
230 BreakoutTrip = TripMultiple =
231 (unsigned)GreatestCommonDivisor64(Count, TripMultiple);
232 }
233
234 // Report the unrolling decision.
235 DebugLoc LoopLoc = L->getStartLoc();
236 Function *F = Header->getParent();
237 LLVMContext &Ctx = F->getContext();
238
239 if (CompletelyUnroll) {
240 DEBUG(dbgs() << "COMPLETELY UNROLLING loop %" << Header->getName()
241 << " with trip count " << TripCount << "!\n");
242 emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc,
243 Twine("completely unrolled loop with ") +
244 Twine(TripCount) + " iterations");
245 } else {
246 auto EmitDiag = [&](const Twine &T) {
247 emitOptimizationRemark(Ctx, DEBUG_TYPE, *F, LoopLoc,
248 "unrolled loop by a factor of " + Twine(Count) +
249 T);
250 };
251
252 DEBUG(dbgs() << "UNROLLING loop %" << Header->getName()
253 << " by " << Count);
254 if (TripMultiple == 0 || BreakoutTrip != TripMultiple) {
255 DEBUG(dbgs() << " with a breakout at trip " << BreakoutTrip);
256 EmitDiag(" with a breakout at trip " + Twine(BreakoutTrip));
257 } else if (TripMultiple != 1) {
258 DEBUG(dbgs() << " with " << TripMultiple << " trips per branch");
259 EmitDiag(" with " + Twine(TripMultiple) + " trips per branch");
260 } else if (RuntimeTripCount) {
261 DEBUG(dbgs() << " with run-time trip count");
262 EmitDiag(" with run-time trip count");
263 }
264 DEBUG(dbgs() << "!\n");
265 }
266
267 bool ContinueOnTrue = L->contains(BI->getSuccessor(0));
268 BasicBlock *LoopExit = BI->getSuccessor(ContinueOnTrue);
269
270 // For the first iteration of the loop, we should use the precloned values for
271 // PHI nodes. Insert associations now.
272 ValueToValueMapTy LastValueMap;
273 std::vector<PHINode*> OrigPHINode;
274 for (BasicBlock::iterator I = Header->begin(); isa<PHINode>(I); ++I) {
275 OrigPHINode.push_back(cast<PHINode>(I));
276 }
277
278 std::vector<BasicBlock*> Headers;
279 std::vector<BasicBlock*> Latches;
280 Headers.push_back(Header);
281 Latches.push_back(LatchBlock);
282
283 // The current on-the-fly SSA update requires blocks to be processed in
284 // reverse postorder so that LastValueMap contains the correct value at each
285 // exit.
286 LoopBlocksDFS DFS(L);
287 DFS.perform(LI);
288
289 // Stash the DFS iterators before adding blocks to the loop.
290 LoopBlocksDFS::RPOIterator BlockBegin = DFS.beginRPO();
291 LoopBlocksDFS::RPOIterator BlockEnd = DFS.endRPO();
292
293 for (unsigned It = 1; It != Count; ++It) {
294 std::vector<BasicBlock*> NewBlocks;
295
296 for (LoopBlocksDFS::RPOIterator BB = BlockBegin; BB != BlockEnd; ++BB) {
297 ValueToValueMapTy VMap;
298 BasicBlock *New = CloneBasicBlock(*BB, VMap, "." + Twine(It));
299 Header->getParent()->getBasicBlockList().push_back(New);
300
301 // Loop over all of the PHI nodes in the block, changing them to use the
302 // incoming values from the previous block.
303 if (*BB == Header)
304 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
305 PHINode *NewPHI = cast<PHINode>(VMap[OrigPHINode[i]]);
306 Value *InVal = NewPHI->getIncomingValueForBlock(LatchBlock);
307 if (Instruction *InValI = dyn_cast<Instruction>(InVal))
308 if (It > 1 && L->contains(InValI))
309 InVal = LastValueMap[InValI];
310 VMap[OrigPHINode[i]] = InVal;
311 New->getInstList().erase(NewPHI);
312 }
313
314 // Update our running map of newest clones
315 LastValueMap[*BB] = New;
316 for (ValueToValueMapTy::iterator VI = VMap.begin(), VE = VMap.end();
317 VI != VE; ++VI)
318 LastValueMap[VI->first] = VI->second;
319
320 L->addBasicBlockToLoop(New, LI->getBase());
321
322 // Add phi entries for newly created values to all exit blocks.
323 for (succ_iterator SI = succ_begin(*BB), SE = succ_end(*BB);
324 SI != SE; ++SI) {
325 if (L->contains(*SI))
326 continue;
327 for (BasicBlock::iterator BBI = (*SI)->begin();
328 PHINode *phi = dyn_cast<PHINode>(BBI); ++BBI) {
329 Value *Incoming = phi->getIncomingValueForBlock(*BB);
330 ValueToValueMapTy::iterator It = LastValueMap.find(Incoming);
331 if (It != LastValueMap.end())
332 Incoming = It->second;
333 phi->addIncoming(Incoming, New);
334 }
335 }
336 // Keep track of new headers and latches as we create them, so that
337 // we can insert the proper branches later.
338 if (*BB == Header)
339 Headers.push_back(New);
340 if (*BB == LatchBlock)
341 Latches.push_back(New);
342
343 NewBlocks.push_back(New);
344 }
345
346 // Remap all instructions in the most recent iteration
347 for (unsigned i = 0; i < NewBlocks.size(); ++i)
348 for (BasicBlock::iterator I = NewBlocks[i]->begin(),
349 E = NewBlocks[i]->end(); I != E; ++I)
350 ::RemapInstruction(I, LastValueMap);
351 }
352
353 // Loop over the PHI nodes in the original block, setting incoming values.
354 for (unsigned i = 0, e = OrigPHINode.size(); i != e; ++i) {
355 PHINode *PN = OrigPHINode[i];
356 if (CompletelyUnroll) {
357 PN->replaceAllUsesWith(PN->getIncomingValueForBlock(Preheader));
358 Header->getInstList().erase(PN);
359 }
360 else if (Count > 1) {
361 Value *InVal = PN->removeIncomingValue(LatchBlock, false);
362 // If this value was defined in the loop, take the value defined by the
363 // last iteration of the loop.
364 if (Instruction *InValI = dyn_cast<Instruction>(InVal)) {
365 if (L->contains(InValI))
366 InVal = LastValueMap[InVal];
367 }
368 assert(Latches.back() == LastValueMap[LatchBlock] && "bad last latch");
369 PN->addIncoming(InVal, Latches.back());
370 }
371 }
372
373 // Now that all the basic blocks for the unrolled iterations are in place,
374 // set up the branches to connect them.
375 for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
376 // The original branch was replicated in each unrolled iteration.
377 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
378
379 // The branch destination.
380 unsigned j = (i + 1) % e;
381 BasicBlock *Dest = Headers[j];
382 bool NeedConditional = true;
383
384 if (RuntimeTripCount && j != 0) {
385 NeedConditional = false;
386 }
387
388 // For a complete unroll, make the last iteration end with a branch
389 // to the exit block.
390 if (CompletelyUnroll && j == 0) {
391 Dest = LoopExit;
392 NeedConditional = false;
393 }
394
395 // If we know the trip count or a multiple of it, we can safely use an
396 // unconditional branch for some iterations.
397 if (j != BreakoutTrip && (TripMultiple == 0 || j % TripMultiple != 0)) {
398 NeedConditional = false;
399 }
400
401 if (NeedConditional) {
402 // Update the conditional branch's successor for the following
403 // iteration.
404 Term->setSuccessor(!ContinueOnTrue, Dest);
405 } else {
406 // Remove phi operands at this loop exit
407 if (Dest != LoopExit) {
408 BasicBlock *BB = Latches[i];
409 for (succ_iterator SI = succ_begin(BB), SE = succ_end(BB);
410 SI != SE; ++SI) {
411 if (*SI == Headers[i])
412 continue;
413 for (BasicBlock::iterator BBI = (*SI)->begin();
414 PHINode *Phi = dyn_cast<PHINode>(BBI); ++BBI) {
415 Phi->removeIncomingValue(BB, false);
416 }
417 }
418 }
419 // Replace the conditional branch with an unconditional one.
420 BranchInst::Create(Dest, Term);
421 Term->eraseFromParent();
422 }
423 }
424
425 // Merge adjacent basic blocks, if possible.
426 for (unsigned i = 0, e = Latches.size(); i != e; ++i) {
427 BranchInst *Term = cast<BranchInst>(Latches[i]->getTerminator());
428 if (Term->isUnconditional()) {
429 BasicBlock *Dest = Term->getSuccessor(0);
430 if (BasicBlock *Fold = FoldBlockIntoPredecessor(Dest, LI, LPM))
431 std::replace(Latches.begin(), Latches.end(), Dest, Fold);
432 }
433 }
434
435 DominatorTree *DT = nullptr;
436 if (PP) {
437 // FIXME: Reconstruct dom info, because it is not preserved properly.
438 // Incrementally updating domtree after loop unrolling would be easy.
439 if (DominatorTreeWrapperPass *DTWP =
440 PP->getAnalysisIfAvailable<DominatorTreeWrapperPass>()) {
441 DT = &DTWP->getDomTree();
442 DT->recalculate(*L->getHeader()->getParent());
443 }
444
445 // Simplify any new induction variables in the partially unrolled loop.
446 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
447 if (SE && !CompletelyUnroll) {
448 SmallVector<WeakVH, 16> DeadInsts;
449 simplifyLoopIVs(L, SE, LPM, DeadInsts);
450
451 // Aggressively clean up dead instructions that simplifyLoopIVs already
452 // identified. Any remaining should be cleaned up below.
453 while (!DeadInsts.empty())
454 if (Instruction *Inst =
455 dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val()))
456 RecursivelyDeleteTriviallyDeadInstructions(Inst);
457 }
458 }
459 // At this point, the code is well formed. We now do a quick sweep over the
460 // inserted code, doing constant propagation and dead code elimination as we
461 // go.
462 const std::vector<BasicBlock*> &NewLoopBlocks = L->getBlocks();
463 for (std::vector<BasicBlock*>::const_iterator BB = NewLoopBlocks.begin(),
464 BBE = NewLoopBlocks.end(); BB != BBE; ++BB)
465 for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end(); I != E; ) {
466 Instruction *Inst = I++;
467
468 if (isInstructionTriviallyDead(Inst))
469 (*BB)->getInstList().erase(Inst);
470 else if (Value *V = SimplifyInstruction(Inst))
471 if (LI->replacementPreservesLCSSAForm(Inst, V)) {
472 Inst->replaceAllUsesWith(V);
473 (*BB)->getInstList().erase(Inst);
474 }
475 }
476
477 NumCompletelyUnrolled += CompletelyUnroll;
478 ++NumUnrolled;
479
480 Loop *OuterL = L->getParentLoop();
481 // Remove the loop from the LoopPassManager if it's completely removed.
482 if (CompletelyUnroll && LPM != nullptr)
483 LPM->deleteLoopFromQueue(L);
484
485 // If we have a pass and a DominatorTree we should re-simplify impacted loops
486 // to ensure subsequent analyses can rely on this form. We want to simplify
487 // at least one layer outside of the loop that was unrolled so that any
488 // changes to the parent loop exposed by the unrolling are considered.
489 if (PP && DT) {
490 if (!OuterL && !CompletelyUnroll)
491 OuterL = L;
492 if (OuterL) {
493 DataLayoutPass *DLP = PP->getAnalysisIfAvailable<DataLayoutPass>();
494 const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
495 ScalarEvolution *SE = PP->getAnalysisIfAvailable<ScalarEvolution>();
496 simplifyLoop(OuterL, DT, LI, PP, /*AliasAnalysis*/ nullptr, SE, DL);
497
498 // LCSSA must be performed on the outermost affected loop. The unrolled
499 // loop's last loop latch is guaranteed to be in the outermost loop after
500 // deleteLoopFromQueue updates LoopInfo.
501 Loop *LatchLoop = LI->getLoopFor(Latches.back());
502 if (!OuterL->contains(LatchLoop))
503 while (OuterL->getParentLoop() != LatchLoop)
504 OuterL = OuterL->getParentLoop();
505
506 formLCSSARecursively(*OuterL, *DT, SE);
507 }
508 }
509
510 return true;
511 }
512