1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs loop invariant code motion, attempting to remove as much
11 // code from the body of a loop as possible. It does this by either hoisting
12 // code into the preheader block, or by sinking code to the exit blocks if it is
13 // safe. This pass also promotes must-aliased memory locations in the loop to
14 // live in registers, thus hoisting and sinking "invariant" loads and stores.
15 //
16 // This pass uses alias analysis for two purposes:
17 //
18 // 1. Moving loop invariant loads and calls out of loops. If we can determine
19 // that a load or call inside of a loop never aliases anything stored to,
20 // we can hoist it or sink it like any other instruction.
21 // 2. Scalar Promotion of Memory - If there is a store instruction inside of
22 // the loop, we try to move the store to happen AFTER the loop instead of
23 // inside of the loop. This can only happen if a few conditions are true:
24 // A. The pointer stored through is loop invariant
25 // B. There are no stores or loads in the loop which _may_ alias the
26 // pointer. There are no calls in the loop which mod/ref the pointer.
27 // If these conditions are true, we can promote the loads and stores in the
28 // loop of the pointer to use a temporary alloca'd variable. We then use
29 // the SSAUpdater to construct the appropriate SSA form for the value.
30 //
31 //===----------------------------------------------------------------------===//
32
33 #define DEBUG_TYPE "licm"
34 #include "llvm/Transforms/Scalar.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/Analysis/AliasAnalysis.h"
37 #include "llvm/Analysis/AliasSetTracker.h"
38 #include "llvm/Analysis/ConstantFolding.h"
39 #include "llvm/Analysis/Dominators.h"
40 #include "llvm/Analysis/LoopInfo.h"
41 #include "llvm/Analysis/LoopPass.h"
42 #include "llvm/Analysis/ValueTracking.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/Instructions.h"
47 #include "llvm/IR/IntrinsicInst.h"
48 #include "llvm/IR/LLVMContext.h"
49 #include "llvm/IR/Metadata.h"
50 #include "llvm/Support/CFG.h"
51 #include "llvm/Support/CommandLine.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/raw_ostream.h"
54 #include "llvm/Target/TargetLibraryInfo.h"
55 #include "llvm/Transforms/Utils/Local.h"
56 #include "llvm/Transforms/Utils/SSAUpdater.h"
57 #include <algorithm>
58 using namespace llvm;
59
60 STATISTIC(NumSunk , "Number of instructions sunk out of loop");
61 STATISTIC(NumHoisted , "Number of instructions hoisted out of loop");
62 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
63 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
64 STATISTIC(NumPromoted , "Number of memory locations promoted to registers");
65
66 static cl::opt<bool>
67 DisablePromotion("disable-licm-promotion", cl::Hidden,
68 cl::desc("Disable memory promotion in LICM pass"));
69
70 namespace {
71 struct LICM : public LoopPass {
72 static char ID; // Pass identification, replacement for typeid
LICM__anon6a7599600111::LICM73 LICM() : LoopPass(ID) {
74 initializeLICMPass(*PassRegistry::getPassRegistry());
75 }
76
77 virtual bool runOnLoop(Loop *L, LPPassManager &LPM);
78
79 /// This transformation requires natural loop information & requires that
80 /// loop preheaders be inserted into the CFG...
81 ///
getAnalysisUsage__anon6a7599600111::LICM82 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
83 AU.setPreservesCFG();
84 AU.addRequired<DominatorTree>();
85 AU.addRequired<LoopInfo>();
86 AU.addRequiredID(LoopSimplifyID);
87 AU.addRequired<AliasAnalysis>();
88 AU.addPreserved<AliasAnalysis>();
89 AU.addPreserved("scalar-evolution");
90 AU.addPreservedID(LoopSimplifyID);
91 AU.addRequired<TargetLibraryInfo>();
92 }
93
94 using llvm::Pass::doFinalization;
95
doFinalization__anon6a7599600111::LICM96 bool doFinalization() {
97 assert(LoopToAliasSetMap.empty() && "Didn't free loop alias sets");
98 return false;
99 }
100
101 private:
102 AliasAnalysis *AA; // Current AliasAnalysis information
103 LoopInfo *LI; // Current LoopInfo
104 DominatorTree *DT; // Dominator Tree for the current Loop.
105
106 DataLayout *TD; // DataLayout for constant folding.
107 TargetLibraryInfo *TLI; // TargetLibraryInfo for constant folding.
108
109 // State that is updated as we process loops.
110 bool Changed; // Set to true when we change anything.
111 BasicBlock *Preheader; // The preheader block of the current loop...
112 Loop *CurLoop; // The current loop we are working on...
113 AliasSetTracker *CurAST; // AliasSet information for the current loop...
114 bool MayThrow; // The current loop contains an instruction which
115 // may throw, thus preventing code motion of
116 // instructions with side effects.
117 DenseMap<Loop*, AliasSetTracker*> LoopToAliasSetMap;
118
119 /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
120 void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L);
121
122 /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
123 /// set.
124 void deleteAnalysisValue(Value *V, Loop *L);
125
126 /// SinkRegion - Walk the specified region of the CFG (defined by all blocks
127 /// dominated by the specified block, and that are in the current loop) in
128 /// reverse depth first order w.r.t the DominatorTree. This allows us to
129 /// visit uses before definitions, allowing us to sink a loop body in one
130 /// pass without iteration.
131 ///
132 void SinkRegion(DomTreeNode *N);
133
134 /// HoistRegion - Walk the specified region of the CFG (defined by all
135 /// blocks dominated by the specified block, and that are in the current
136 /// loop) in depth first order w.r.t the DominatorTree. This allows us to
137 /// visit definitions before uses, allowing us to hoist a loop body in one
138 /// pass without iteration.
139 ///
140 void HoistRegion(DomTreeNode *N);
141
142 /// inSubLoop - Little predicate that returns true if the specified basic
143 /// block is in a subloop of the current one, not the current one itself.
144 ///
inSubLoop__anon6a7599600111::LICM145 bool inSubLoop(BasicBlock *BB) {
146 assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
147 return LI->getLoopFor(BB) != CurLoop;
148 }
149
150 /// sink - When an instruction is found to only be used outside of the loop,
151 /// this function moves it to the exit blocks and patches up SSA form as
152 /// needed.
153 ///
154 void sink(Instruction &I);
155
156 /// hoist - When an instruction is found to only use loop invariant operands
157 /// that is safe to hoist, this instruction is called to do the dirty work.
158 ///
159 void hoist(Instruction &I);
160
161 /// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it
162 /// is not a trapping instruction or if it is a trapping instruction and is
163 /// guaranteed to execute.
164 ///
165 bool isSafeToExecuteUnconditionally(Instruction &I);
166
167 /// isGuaranteedToExecute - Check that the instruction is guaranteed to
168 /// execute.
169 ///
170 bool isGuaranteedToExecute(Instruction &I);
171
172 /// pointerInvalidatedByLoop - Return true if the body of this loop may
173 /// store into the memory location pointed to by V.
174 ///
pointerInvalidatedByLoop__anon6a7599600111::LICM175 bool pointerInvalidatedByLoop(Value *V, uint64_t Size,
176 const MDNode *TBAAInfo) {
177 // Check to see if any of the basic blocks in CurLoop invalidate *V.
178 return CurAST->getAliasSetForPointer(V, Size, TBAAInfo).isMod();
179 }
180
181 bool canSinkOrHoistInst(Instruction &I);
182 bool isNotUsedInLoop(Instruction &I);
183
184 void PromoteAliasSet(AliasSet &AS,
185 SmallVectorImpl<BasicBlock*> &ExitBlocks,
186 SmallVectorImpl<Instruction*> &InsertPts);
187 };
188 }
189
190 char LICM::ID = 0;
191 INITIALIZE_PASS_BEGIN(LICM, "licm", "Loop Invariant Code Motion", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTree)192 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
193 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
194 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
195 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo)
196 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
197 INITIALIZE_PASS_END(LICM, "licm", "Loop Invariant Code Motion", false, false)
198
199 Pass *llvm::createLICMPass() { return new LICM(); }
200
201 /// Hoist expressions out of the specified loop. Note, alias info for inner
202 /// loop is not preserved so it is not a good idea to run LICM multiple
203 /// times on one loop.
204 ///
runOnLoop(Loop * L,LPPassManager & LPM)205 bool LICM::runOnLoop(Loop *L, LPPassManager &LPM) {
206 Changed = false;
207
208 // Get our Loop and Alias Analysis information...
209 LI = &getAnalysis<LoopInfo>();
210 AA = &getAnalysis<AliasAnalysis>();
211 DT = &getAnalysis<DominatorTree>();
212
213 TD = getAnalysisIfAvailable<DataLayout>();
214 TLI = &getAnalysis<TargetLibraryInfo>();
215
216 CurAST = new AliasSetTracker(*AA);
217 // Collect Alias info from subloops.
218 for (Loop::iterator LoopItr = L->begin(), LoopItrE = L->end();
219 LoopItr != LoopItrE; ++LoopItr) {
220 Loop *InnerL = *LoopItr;
221 AliasSetTracker *InnerAST = LoopToAliasSetMap[InnerL];
222 assert(InnerAST && "Where is my AST?");
223
224 // What if InnerLoop was modified by other passes ?
225 CurAST->add(*InnerAST);
226
227 // Once we've incorporated the inner loop's AST into ours, we don't need the
228 // subloop's anymore.
229 delete InnerAST;
230 LoopToAliasSetMap.erase(InnerL);
231 }
232
233 CurLoop = L;
234
235 // Get the preheader block to move instructions into...
236 Preheader = L->getLoopPreheader();
237
238 // Loop over the body of this loop, looking for calls, invokes, and stores.
239 // Because subloops have already been incorporated into AST, we skip blocks in
240 // subloops.
241 //
242 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
243 I != E; ++I) {
244 BasicBlock *BB = *I;
245 if (LI->getLoopFor(BB) == L) // Ignore blocks in subloops.
246 CurAST->add(*BB); // Incorporate the specified basic block
247 }
248
249 MayThrow = false;
250 // TODO: We've already searched for instructions which may throw in subloops.
251 // We may want to reuse this information.
252 for (Loop::block_iterator BB = L->block_begin(), BBE = L->block_end();
253 (BB != BBE) && !MayThrow ; ++BB)
254 for (BasicBlock::iterator I = (*BB)->begin(), E = (*BB)->end();
255 (I != E) && !MayThrow; ++I)
256 MayThrow |= I->mayThrow();
257
258 // We want to visit all of the instructions in this loop... that are not parts
259 // of our subloops (they have already had their invariants hoisted out of
260 // their loop, into this loop, so there is no need to process the BODIES of
261 // the subloops).
262 //
263 // Traverse the body of the loop in depth first order on the dominator tree so
264 // that we are guaranteed to see definitions before we see uses. This allows
265 // us to sink instructions in one pass, without iteration. After sinking
266 // instructions, we perform another pass to hoist them out of the loop.
267 //
268 if (L->hasDedicatedExits())
269 SinkRegion(DT->getNode(L->getHeader()));
270 if (Preheader)
271 HoistRegion(DT->getNode(L->getHeader()));
272
273 // Now that all loop invariants have been removed from the loop, promote any
274 // memory references to scalars that we can.
275 if (!DisablePromotion && Preheader && L->hasDedicatedExits()) {
276 SmallVector<BasicBlock *, 8> ExitBlocks;
277 SmallVector<Instruction *, 8> InsertPts;
278
279 // Loop over all of the alias sets in the tracker object.
280 for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
281 I != E; ++I)
282 PromoteAliasSet(*I, ExitBlocks, InsertPts);
283 }
284
285 // Clear out loops state information for the next iteration
286 CurLoop = 0;
287 Preheader = 0;
288
289 // If this loop is nested inside of another one, save the alias information
290 // for when we process the outer loop.
291 if (L->getParentLoop())
292 LoopToAliasSetMap[L] = CurAST;
293 else
294 delete CurAST;
295 return Changed;
296 }
297
298 /// SinkRegion - Walk the specified region of the CFG (defined by all blocks
299 /// dominated by the specified block, and that are in the current loop) in
300 /// reverse depth first order w.r.t the DominatorTree. This allows us to visit
301 /// uses before definitions, allowing us to sink a loop body in one pass without
302 /// iteration.
303 ///
SinkRegion(DomTreeNode * N)304 void LICM::SinkRegion(DomTreeNode *N) {
305 assert(N != 0 && "Null dominator tree node?");
306 BasicBlock *BB = N->getBlock();
307
308 // If this subregion is not in the top level loop at all, exit.
309 if (!CurLoop->contains(BB)) return;
310
311 // We are processing blocks in reverse dfo, so process children first.
312 const std::vector<DomTreeNode*> &Children = N->getChildren();
313 for (unsigned i = 0, e = Children.size(); i != e; ++i)
314 SinkRegion(Children[i]);
315
316 // Only need to process the contents of this block if it is not part of a
317 // subloop (which would already have been processed).
318 if (inSubLoop(BB)) return;
319
320 for (BasicBlock::iterator II = BB->end(); II != BB->begin(); ) {
321 Instruction &I = *--II;
322
323 // If the instruction is dead, we would try to sink it because it isn't used
324 // in the loop, instead, just delete it.
325 if (isInstructionTriviallyDead(&I, TLI)) {
326 DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
327 ++II;
328 CurAST->deleteValue(&I);
329 I.eraseFromParent();
330 Changed = true;
331 continue;
332 }
333
334 // Check to see if we can sink this instruction to the exit blocks
335 // of the loop. We can do this if the all users of the instruction are
336 // outside of the loop. In this case, it doesn't even matter if the
337 // operands of the instruction are loop invariant.
338 //
339 if (isNotUsedInLoop(I) && canSinkOrHoistInst(I)) {
340 ++II;
341 sink(I);
342 }
343 }
344 }
345
346 /// HoistRegion - Walk the specified region of the CFG (defined by all blocks
347 /// dominated by the specified block, and that are in the current loop) in depth
348 /// first order w.r.t the DominatorTree. This allows us to visit definitions
349 /// before uses, allowing us to hoist a loop body in one pass without iteration.
350 ///
HoistRegion(DomTreeNode * N)351 void LICM::HoistRegion(DomTreeNode *N) {
352 assert(N != 0 && "Null dominator tree node?");
353 BasicBlock *BB = N->getBlock();
354
355 // If this subregion is not in the top level loop at all, exit.
356 if (!CurLoop->contains(BB)) return;
357
358 // Only need to process the contents of this block if it is not part of a
359 // subloop (which would already have been processed).
360 if (!inSubLoop(BB))
361 for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E; ) {
362 Instruction &I = *II++;
363
364 // Try constant folding this instruction. If all the operands are
365 // constants, it is technically hoistable, but it would be better to just
366 // fold it.
367 if (Constant *C = ConstantFoldInstruction(&I, TD, TLI)) {
368 DEBUG(dbgs() << "LICM folding inst: " << I << " --> " << *C << '\n');
369 CurAST->copyValue(&I, C);
370 CurAST->deleteValue(&I);
371 I.replaceAllUsesWith(C);
372 I.eraseFromParent();
373 continue;
374 }
375
376 // Try hoisting the instruction out to the preheader. We can only do this
377 // if all of the operands of the instruction are loop invariant and if it
378 // is safe to hoist the instruction.
379 //
380 if (CurLoop->hasLoopInvariantOperands(&I) && canSinkOrHoistInst(I) &&
381 isSafeToExecuteUnconditionally(I))
382 hoist(I);
383 }
384
385 const std::vector<DomTreeNode*> &Children = N->getChildren();
386 for (unsigned i = 0, e = Children.size(); i != e; ++i)
387 HoistRegion(Children[i]);
388 }
389
390 /// canSinkOrHoistInst - Return true if the hoister and sinker can handle this
391 /// instruction.
392 ///
canSinkOrHoistInst(Instruction & I)393 bool LICM::canSinkOrHoistInst(Instruction &I) {
394 // Loads have extra constraints we have to verify before we can hoist them.
395 if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
396 if (!LI->isUnordered())
397 return false; // Don't hoist volatile/atomic loads!
398
399 // Loads from constant memory are always safe to move, even if they end up
400 // in the same alias set as something that ends up being modified.
401 if (AA->pointsToConstantMemory(LI->getOperand(0)))
402 return true;
403 if (LI->getMetadata("invariant.load"))
404 return true;
405
406 // Don't hoist loads which have may-aliased stores in loop.
407 uint64_t Size = 0;
408 if (LI->getType()->isSized())
409 Size = AA->getTypeStoreSize(LI->getType());
410 return !pointerInvalidatedByLoop(LI->getOperand(0), Size,
411 LI->getMetadata(LLVMContext::MD_tbaa));
412 } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
413 // Don't sink or hoist dbg info; it's legal, but not useful.
414 if (isa<DbgInfoIntrinsic>(I))
415 return false;
416
417 // Handle simple cases by querying alias analysis.
418 AliasAnalysis::ModRefBehavior Behavior = AA->getModRefBehavior(CI);
419 if (Behavior == AliasAnalysis::DoesNotAccessMemory)
420 return true;
421 if (AliasAnalysis::onlyReadsMemory(Behavior)) {
422 // If this call only reads from memory and there are no writes to memory
423 // in the loop, we can hoist or sink the call as appropriate.
424 bool FoundMod = false;
425 for (AliasSetTracker::iterator I = CurAST->begin(), E = CurAST->end();
426 I != E; ++I) {
427 AliasSet &AS = *I;
428 if (!AS.isForwardingAliasSet() && AS.isMod()) {
429 FoundMod = true;
430 break;
431 }
432 }
433 if (!FoundMod) return true;
434 }
435
436 // FIXME: This should use mod/ref information to see if we can hoist or
437 // sink the call.
438
439 return false;
440 }
441
442 // Only these instructions are hoistable/sinkable.
443 if (!isa<BinaryOperator>(I) && !isa<CastInst>(I) && !isa<SelectInst>(I) &&
444 !isa<GetElementPtrInst>(I) && !isa<CmpInst>(I) &&
445 !isa<InsertElementInst>(I) && !isa<ExtractElementInst>(I) &&
446 !isa<ShuffleVectorInst>(I) && !isa<ExtractValueInst>(I) &&
447 !isa<InsertValueInst>(I))
448 return false;
449
450 return isSafeToExecuteUnconditionally(I);
451 }
452
453 /// isNotUsedInLoop - Return true if the only users of this instruction are
454 /// outside of the loop. If this is true, we can sink the instruction to the
455 /// exit blocks of the loop.
456 ///
isNotUsedInLoop(Instruction & I)457 bool LICM::isNotUsedInLoop(Instruction &I) {
458 for (Value::use_iterator UI = I.use_begin(), E = I.use_end(); UI != E; ++UI) {
459 Instruction *User = cast<Instruction>(*UI);
460 if (PHINode *PN = dyn_cast<PHINode>(User)) {
461 // PHI node uses occur in predecessor blocks!
462 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
463 if (PN->getIncomingValue(i) == &I)
464 if (CurLoop->contains(PN->getIncomingBlock(i)))
465 return false;
466 } else if (CurLoop->contains(User)) {
467 return false;
468 }
469 }
470 return true;
471 }
472
473
474 /// sink - When an instruction is found to only be used outside of the loop,
475 /// this function moves it to the exit blocks and patches up SSA form as needed.
476 /// This method is guaranteed to remove the original instruction from its
477 /// position, and may either delete it or move it to outside of the loop.
478 ///
sink(Instruction & I)479 void LICM::sink(Instruction &I) {
480 DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
481
482 SmallVector<BasicBlock*, 8> ExitBlocks;
483 CurLoop->getUniqueExitBlocks(ExitBlocks);
484
485 if (isa<LoadInst>(I)) ++NumMovedLoads;
486 else if (isa<CallInst>(I)) ++NumMovedCalls;
487 ++NumSunk;
488 Changed = true;
489
490 // The case where there is only a single exit node of this loop is common
491 // enough that we handle it as a special (more efficient) case. It is more
492 // efficient to handle because there are no PHI nodes that need to be placed.
493 if (ExitBlocks.size() == 1) {
494 if (!DT->dominates(I.getParent(), ExitBlocks[0])) {
495 // Instruction is not used, just delete it.
496 CurAST->deleteValue(&I);
497 // If I has users in unreachable blocks, eliminate.
498 // If I is not void type then replaceAllUsesWith undef.
499 // This allows ValueHandlers and custom metadata to adjust itself.
500 if (!I.use_empty())
501 I.replaceAllUsesWith(UndefValue::get(I.getType()));
502 I.eraseFromParent();
503 } else {
504 // Move the instruction to the start of the exit block, after any PHI
505 // nodes in it.
506 I.moveBefore(ExitBlocks[0]->getFirstInsertionPt());
507
508 // This instruction is no longer in the AST for the current loop, because
509 // we just sunk it out of the loop. If we just sunk it into an outer
510 // loop, we will rediscover the operation when we process it.
511 CurAST->deleteValue(&I);
512 }
513 return;
514 }
515
516 if (ExitBlocks.empty()) {
517 // The instruction is actually dead if there ARE NO exit blocks.
518 CurAST->deleteValue(&I);
519 // If I has users in unreachable blocks, eliminate.
520 // If I is not void type then replaceAllUsesWith undef.
521 // This allows ValueHandlers and custom metadata to adjust itself.
522 if (!I.use_empty())
523 I.replaceAllUsesWith(UndefValue::get(I.getType()));
524 I.eraseFromParent();
525 return;
526 }
527
528 // Otherwise, if we have multiple exits, use the SSAUpdater to do all of the
529 // hard work of inserting PHI nodes as necessary.
530 SmallVector<PHINode*, 8> NewPHIs;
531 SSAUpdater SSA(&NewPHIs);
532
533 if (!I.use_empty())
534 SSA.Initialize(I.getType(), I.getName());
535
536 // Insert a copy of the instruction in each exit block of the loop that is
537 // dominated by the instruction. Each exit block is known to only be in the
538 // ExitBlocks list once.
539 BasicBlock *InstOrigBB = I.getParent();
540 unsigned NumInserted = 0;
541
542 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) {
543 BasicBlock *ExitBlock = ExitBlocks[i];
544
545 if (!DT->dominates(InstOrigBB, ExitBlock))
546 continue;
547
548 // Insert the code after the last PHI node.
549 BasicBlock::iterator InsertPt = ExitBlock->getFirstInsertionPt();
550
551 // If this is the first exit block processed, just move the original
552 // instruction, otherwise clone the original instruction and insert
553 // the copy.
554 Instruction *New;
555 if (NumInserted++ == 0) {
556 I.moveBefore(InsertPt);
557 New = &I;
558 } else {
559 New = I.clone();
560 if (!I.getName().empty())
561 New->setName(I.getName()+".le");
562 ExitBlock->getInstList().insert(InsertPt, New);
563 }
564
565 // Now that we have inserted the instruction, inform SSAUpdater.
566 if (!I.use_empty())
567 SSA.AddAvailableValue(ExitBlock, New);
568 }
569
570 // If the instruction doesn't dominate any exit blocks, it must be dead.
571 if (NumInserted == 0) {
572 CurAST->deleteValue(&I);
573 if (!I.use_empty())
574 I.replaceAllUsesWith(UndefValue::get(I.getType()));
575 I.eraseFromParent();
576 return;
577 }
578
579 // Next, rewrite uses of the instruction, inserting PHI nodes as needed.
580 for (Value::use_iterator UI = I.use_begin(), UE = I.use_end(); UI != UE; ) {
581 // Grab the use before incrementing the iterator.
582 Use &U = UI.getUse();
583 // Increment the iterator before removing the use from the list.
584 ++UI;
585 SSA.RewriteUseAfterInsertions(U);
586 }
587
588 // Update CurAST for NewPHIs if I had pointer type.
589 if (I.getType()->isPointerTy())
590 for (unsigned i = 0, e = NewPHIs.size(); i != e; ++i)
591 CurAST->copyValue(&I, NewPHIs[i]);
592
593 // Finally, remove the instruction from CurAST. It is no longer in the loop.
594 CurAST->deleteValue(&I);
595 }
596
597 /// hoist - When an instruction is found to only use loop invariant operands
598 /// that is safe to hoist, this instruction is called to do the dirty work.
599 ///
hoist(Instruction & I)600 void LICM::hoist(Instruction &I) {
601 DEBUG(dbgs() << "LICM hoisting to " << Preheader->getName() << ": "
602 << I << "\n");
603
604 // Move the new node to the Preheader, before its terminator.
605 I.moveBefore(Preheader->getTerminator());
606
607 if (isa<LoadInst>(I)) ++NumMovedLoads;
608 else if (isa<CallInst>(I)) ++NumMovedCalls;
609 ++NumHoisted;
610 Changed = true;
611 }
612
613 /// isSafeToExecuteUnconditionally - Only sink or hoist an instruction if it is
614 /// not a trapping instruction or if it is a trapping instruction and is
615 /// guaranteed to execute.
616 ///
isSafeToExecuteUnconditionally(Instruction & Inst)617 bool LICM::isSafeToExecuteUnconditionally(Instruction &Inst) {
618 // If it is not a trapping instruction, it is always safe to hoist.
619 if (isSafeToSpeculativelyExecute(&Inst))
620 return true;
621
622 return isGuaranteedToExecute(Inst);
623 }
624
isGuaranteedToExecute(Instruction & Inst)625 bool LICM::isGuaranteedToExecute(Instruction &Inst) {
626
627 // Somewhere in this loop there is an instruction which may throw and make us
628 // exit the loop.
629 if (MayThrow)
630 return false;
631
632 // Otherwise we have to check to make sure that the instruction dominates all
633 // of the exit blocks. If it doesn't, then there is a path out of the loop
634 // which does not execute this instruction, so we can't hoist it.
635
636 // If the instruction is in the header block for the loop (which is very
637 // common), it is always guaranteed to dominate the exit blocks. Since this
638 // is a common case, and can save some work, check it now.
639 if (Inst.getParent() == CurLoop->getHeader())
640 return true;
641
642 // Get the exit blocks for the current loop.
643 SmallVector<BasicBlock*, 8> ExitBlocks;
644 CurLoop->getExitBlocks(ExitBlocks);
645
646 // Verify that the block dominates each of the exit blocks of the loop.
647 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
648 if (!DT->dominates(Inst.getParent(), ExitBlocks[i]))
649 return false;
650
651 // As a degenerate case, if the loop is statically infinite then we haven't
652 // proven anything since there are no exit blocks.
653 if (ExitBlocks.empty())
654 return false;
655
656 return true;
657 }
658
659 namespace {
660 class LoopPromoter : public LoadAndStorePromoter {
661 Value *SomePtr; // Designated pointer to store to.
662 SmallPtrSet<Value*, 4> &PointerMustAliases;
663 SmallVectorImpl<BasicBlock*> &LoopExitBlocks;
664 SmallVectorImpl<Instruction*> &LoopInsertPts;
665 AliasSetTracker &AST;
666 DebugLoc DL;
667 int Alignment;
668 MDNode *TBAATag;
669 public:
LoopPromoter(Value * SP,const SmallVectorImpl<Instruction * > & Insts,SSAUpdater & S,SmallPtrSet<Value *,4> & PMA,SmallVectorImpl<BasicBlock * > & LEB,SmallVectorImpl<Instruction * > & LIP,AliasSetTracker & ast,DebugLoc dl,int alignment,MDNode * TBAATag)670 LoopPromoter(Value *SP,
671 const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
672 SmallPtrSet<Value*, 4> &PMA,
673 SmallVectorImpl<BasicBlock*> &LEB,
674 SmallVectorImpl<Instruction*> &LIP,
675 AliasSetTracker &ast, DebugLoc dl, int alignment,
676 MDNode *TBAATag)
677 : LoadAndStorePromoter(Insts, S), SomePtr(SP),
678 PointerMustAliases(PMA), LoopExitBlocks(LEB), LoopInsertPts(LIP),
679 AST(ast), DL(dl), Alignment(alignment), TBAATag(TBAATag) {}
680
isInstInList(Instruction * I,const SmallVectorImpl<Instruction * > &) const681 virtual bool isInstInList(Instruction *I,
682 const SmallVectorImpl<Instruction*> &) const {
683 Value *Ptr;
684 if (LoadInst *LI = dyn_cast<LoadInst>(I))
685 Ptr = LI->getOperand(0);
686 else
687 Ptr = cast<StoreInst>(I)->getPointerOperand();
688 return PointerMustAliases.count(Ptr);
689 }
690
doExtraRewritesBeforeFinalDeletion() const691 virtual void doExtraRewritesBeforeFinalDeletion() const {
692 // Insert stores after in the loop exit blocks. Each exit block gets a
693 // store of the live-out values that feed them. Since we've already told
694 // the SSA updater about the defs in the loop and the preheader
695 // definition, it is all set and we can start using it.
696 for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
697 BasicBlock *ExitBlock = LoopExitBlocks[i];
698 Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
699 Instruction *InsertPos = LoopInsertPts[i];
700 StoreInst *NewSI = new StoreInst(LiveInValue, SomePtr, InsertPos);
701 NewSI->setAlignment(Alignment);
702 NewSI->setDebugLoc(DL);
703 if (TBAATag) NewSI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
704 }
705 }
706
replaceLoadWithValue(LoadInst * LI,Value * V) const707 virtual void replaceLoadWithValue(LoadInst *LI, Value *V) const {
708 // Update alias analysis.
709 AST.copyValue(LI, V);
710 }
instructionDeleted(Instruction * I) const711 virtual void instructionDeleted(Instruction *I) const {
712 AST.deleteValue(I);
713 }
714 };
715 } // end anon namespace
716
717 /// PromoteAliasSet - Try to promote memory values to scalars by sinking
718 /// stores out of the loop and moving loads to before the loop. We do this by
719 /// looping over the stores in the loop, looking for stores to Must pointers
720 /// which are loop invariant.
721 ///
PromoteAliasSet(AliasSet & AS,SmallVectorImpl<BasicBlock * > & ExitBlocks,SmallVectorImpl<Instruction * > & InsertPts)722 void LICM::PromoteAliasSet(AliasSet &AS,
723 SmallVectorImpl<BasicBlock*> &ExitBlocks,
724 SmallVectorImpl<Instruction*> &InsertPts) {
725 // We can promote this alias set if it has a store, if it is a "Must" alias
726 // set, if the pointer is loop invariant, and if we are not eliminating any
727 // volatile loads or stores.
728 if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
729 AS.isVolatile() || !CurLoop->isLoopInvariant(AS.begin()->getValue()))
730 return;
731
732 assert(!AS.empty() &&
733 "Must alias set should have at least one pointer element in it!");
734 Value *SomePtr = AS.begin()->getValue();
735
736 // It isn't safe to promote a load/store from the loop if the load/store is
737 // conditional. For example, turning:
738 //
739 // for () { if (c) *P += 1; }
740 //
741 // into:
742 //
743 // tmp = *P; for () { if (c) tmp +=1; } *P = tmp;
744 //
745 // is not safe, because *P may only be valid to access if 'c' is true.
746 //
747 // It is safe to promote P if all uses are direct load/stores and if at
748 // least one is guaranteed to be executed.
749 bool GuaranteedToExecute = false;
750
751 SmallVector<Instruction*, 64> LoopUses;
752 SmallPtrSet<Value*, 4> PointerMustAliases;
753
754 // We start with an alignment of one and try to find instructions that allow
755 // us to prove better alignment.
756 unsigned Alignment = 1;
757 MDNode *TBAATag = 0;
758
759 // Check that all of the pointers in the alias set have the same type. We
760 // cannot (yet) promote a memory location that is loaded and stored in
761 // different sizes. While we are at it, collect alignment and TBAA info.
762 for (AliasSet::iterator ASI = AS.begin(), E = AS.end(); ASI != E; ++ASI) {
763 Value *ASIV = ASI->getValue();
764 PointerMustAliases.insert(ASIV);
765
766 // Check that all of the pointers in the alias set have the same type. We
767 // cannot (yet) promote a memory location that is loaded and stored in
768 // different sizes.
769 if (SomePtr->getType() != ASIV->getType())
770 return;
771
772 for (Value::use_iterator UI = ASIV->use_begin(), UE = ASIV->use_end();
773 UI != UE; ++UI) {
774 // Ignore instructions that are outside the loop.
775 Instruction *Use = dyn_cast<Instruction>(*UI);
776 if (!Use || !CurLoop->contains(Use))
777 continue;
778
779 // If there is an non-load/store instruction in the loop, we can't promote
780 // it.
781 if (LoadInst *load = dyn_cast<LoadInst>(Use)) {
782 assert(!load->isVolatile() && "AST broken");
783 if (!load->isSimple())
784 return;
785 } else if (StoreInst *store = dyn_cast<StoreInst>(Use)) {
786 // Stores *of* the pointer are not interesting, only stores *to* the
787 // pointer.
788 if (Use->getOperand(1) != ASIV)
789 continue;
790 assert(!store->isVolatile() && "AST broken");
791 if (!store->isSimple())
792 return;
793
794 // Note that we only check GuaranteedToExecute inside the store case
795 // so that we do not introduce stores where they did not exist before
796 // (which would break the LLVM concurrency model).
797
798 // If the alignment of this instruction allows us to specify a more
799 // restrictive (and performant) alignment and if we are sure this
800 // instruction will be executed, update the alignment.
801 // Larger is better, with the exception of 0 being the best alignment.
802 unsigned InstAlignment = store->getAlignment();
803 if ((InstAlignment > Alignment || InstAlignment == 0) && Alignment != 0)
804 if (isGuaranteedToExecute(*Use)) {
805 GuaranteedToExecute = true;
806 Alignment = InstAlignment;
807 }
808
809 if (!GuaranteedToExecute)
810 GuaranteedToExecute = isGuaranteedToExecute(*Use);
811
812 } else
813 return; // Not a load or store.
814
815 // Merge the TBAA tags.
816 if (LoopUses.empty()) {
817 // On the first load/store, just take its TBAA tag.
818 TBAATag = Use->getMetadata(LLVMContext::MD_tbaa);
819 } else if (TBAATag) {
820 TBAATag = MDNode::getMostGenericTBAA(TBAATag,
821 Use->getMetadata(LLVMContext::MD_tbaa));
822 }
823
824 LoopUses.push_back(Use);
825 }
826 }
827
828 // If there isn't a guaranteed-to-execute instruction, we can't promote.
829 if (!GuaranteedToExecute)
830 return;
831
832 // Otherwise, this is safe to promote, lets do it!
833 DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " <<*SomePtr<<'\n');
834 Changed = true;
835 ++NumPromoted;
836
837 // Grab a debug location for the inserted loads/stores; given that the
838 // inserted loads/stores have little relation to the original loads/stores,
839 // this code just arbitrarily picks a location from one, since any debug
840 // location is better than none.
841 DebugLoc DL = LoopUses[0]->getDebugLoc();
842
843 // Figure out the loop exits and their insertion points, if this is the
844 // first promotion.
845 if (ExitBlocks.empty()) {
846 CurLoop->getUniqueExitBlocks(ExitBlocks);
847 InsertPts.resize(ExitBlocks.size());
848 for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i)
849 InsertPts[i] = ExitBlocks[i]->getFirstInsertionPt();
850 }
851
852 // We use the SSAUpdater interface to insert phi nodes as required.
853 SmallVector<PHINode*, 16> NewPHIs;
854 SSAUpdater SSA(&NewPHIs);
855 LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
856 InsertPts, *CurAST, DL, Alignment, TBAATag);
857
858 // Set up the preheader to have a definition of the value. It is the live-out
859 // value from the preheader that uses in the loop will use.
860 LoadInst *PreheaderLoad =
861 new LoadInst(SomePtr, SomePtr->getName()+".promoted",
862 Preheader->getTerminator());
863 PreheaderLoad->setAlignment(Alignment);
864 PreheaderLoad->setDebugLoc(DL);
865 if (TBAATag) PreheaderLoad->setMetadata(LLVMContext::MD_tbaa, TBAATag);
866 SSA.AddAvailableValue(Preheader, PreheaderLoad);
867
868 // Rewrite all the loads in the loop and remember all the definitions from
869 // stores in the loop.
870 Promoter.run(LoopUses);
871
872 // If the SSAUpdater didn't use the load in the preheader, just zap it now.
873 if (PreheaderLoad->use_empty())
874 PreheaderLoad->eraseFromParent();
875 }
876
877
878 /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
cloneBasicBlockAnalysis(BasicBlock * From,BasicBlock * To,Loop * L)879 void LICM::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To, Loop *L) {
880 AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
881 if (!AST)
882 return;
883
884 AST->copyValue(From, To);
885 }
886
887 /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
888 /// set.
deleteAnalysisValue(Value * V,Loop * L)889 void LICM::deleteAnalysisValue(Value *V, Loop *L) {
890 AliasSetTracker *AST = LoopToAliasSetMap.lookup(L);
891 if (!AST)
892 return;
893
894 AST->deleteValue(V);
895 }
896