• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- LICM.cpp - Loop Invariant Code Motion Pass ------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs loop invariant code motion, attempting to remove as much
10 // code from the body of a loop as possible.  It does this by either hoisting
11 // code into the preheader block, or by sinking code to the exit blocks if it is
12 // safe.  This pass also promotes must-aliased memory locations in the loop to
13 // live in registers, thus hoisting and sinking "invariant" loads and stores.
14 //
15 // This pass uses alias analysis for two purposes:
16 //
17 //  1. Moving loop invariant loads and calls out of loops.  If we can determine
18 //     that a load or call inside of a loop never aliases anything stored to,
19 //     we can hoist it or sink it like any other instruction.
20 //  2. Scalar Promotion of Memory - If there is a store instruction inside of
21 //     the loop, we try to move the store to happen AFTER the loop instead of
22 //     inside of the loop.  This can only happen if a few conditions are true:
23 //       A. The pointer stored through is loop invariant
24 //       B. There are no stores or loads in the loop which _may_ alias the
25 //          pointer.  There are no calls in the loop which mod/ref the pointer.
26 //     If these conditions are true, we can promote the loads and stores in the
27 //     loop of the pointer to use a temporary alloca'd variable.  We then use
28 //     the SSAUpdater to construct the appropriate SSA form for the value.
29 //
30 //===----------------------------------------------------------------------===//
31 
32 #include "llvm/Transforms/Scalar/LICM.h"
33 #include "llvm/ADT/SetOperations.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/Analysis/AliasAnalysis.h"
36 #include "llvm/Analysis/AliasSetTracker.h"
37 #include "llvm/Analysis/BasicAliasAnalysis.h"
38 #include "llvm/Analysis/CaptureTracking.h"
39 #include "llvm/Analysis/ConstantFolding.h"
40 #include "llvm/Analysis/GlobalsModRef.h"
41 #include "llvm/Analysis/GuardUtils.h"
42 #include "llvm/Analysis/Loads.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/LoopIterator.h"
45 #include "llvm/Analysis/LoopPass.h"
46 #include "llvm/Analysis/MemoryBuiltins.h"
47 #include "llvm/Analysis/MemorySSA.h"
48 #include "llvm/Analysis/MemorySSAUpdater.h"
49 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
50 #include "llvm/Analysis/ScalarEvolution.h"
51 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
52 #include "llvm/Analysis/TargetLibraryInfo.h"
53 #include "llvm/Analysis/ValueTracking.h"
54 #include "llvm/IR/CFG.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DebugInfoMetadata.h"
58 #include "llvm/IR/DerivedTypes.h"
59 #include "llvm/IR/Dominators.h"
60 #include "llvm/IR/Instructions.h"
61 #include "llvm/IR/IntrinsicInst.h"
62 #include "llvm/IR/LLVMContext.h"
63 #include "llvm/IR/Metadata.h"
64 #include "llvm/IR/PatternMatch.h"
65 #include "llvm/IR/PredIteratorCache.h"
66 #include "llvm/InitializePasses.h"
67 #include "llvm/Support/CommandLine.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/raw_ostream.h"
70 #include "llvm/Transforms/Scalar.h"
71 #include "llvm/Transforms/Scalar/LoopPassManager.h"
72 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
73 #include "llvm/Transforms/Utils/Local.h"
74 #include "llvm/Transforms/Utils/LoopUtils.h"
75 #include "llvm/Transforms/Utils/SSAUpdater.h"
76 #include <algorithm>
77 #include <utility>
78 using namespace llvm;
79 
80 #define DEBUG_TYPE "licm"
81 
82 STATISTIC(NumCreatedBlocks, "Number of blocks created");
83 STATISTIC(NumClonedBranches, "Number of branches cloned");
84 STATISTIC(NumSunk, "Number of instructions sunk out of loop");
85 STATISTIC(NumHoisted, "Number of instructions hoisted out of loop");
86 STATISTIC(NumMovedLoads, "Number of load insts hoisted or sunk");
87 STATISTIC(NumMovedCalls, "Number of call insts hoisted or sunk");
88 STATISTIC(NumPromoted, "Number of memory locations promoted to registers");
89 
90 /// Memory promotion is enabled by default.
91 static cl::opt<bool>
92     DisablePromotion("disable-licm-promotion", cl::Hidden, cl::init(false),
93                      cl::desc("Disable memory promotion in LICM pass"));
94 
95 static cl::opt<bool> ControlFlowHoisting(
96     "licm-control-flow-hoisting", cl::Hidden, cl::init(false),
97     cl::desc("Enable control flow (and PHI) hoisting in LICM"));
98 
99 static cl::opt<uint32_t> MaxNumUsesTraversed(
100     "licm-max-num-uses-traversed", cl::Hidden, cl::init(8),
101     cl::desc("Max num uses visited for identifying load "
102              "invariance in loop using invariant start (default = 8)"));
103 
104 // Default value of zero implies we use the regular alias set tracker mechanism
105 // instead of the cross product using AA to identify aliasing of the memory
106 // location we are interested in.
107 static cl::opt<int>
108 LICMN2Theshold("licm-n2-threshold", cl::Hidden, cl::init(0),
109                cl::desc("How many instruction to cross product using AA"));
110 
111 // Experimental option to allow imprecision in LICM in pathological cases, in
112 // exchange for faster compile. This is to be removed if MemorySSA starts to
113 // address the same issue. This flag applies only when LICM uses MemorySSA
114 // instead on AliasSetTracker. LICM calls MemorySSAWalker's
115 // getClobberingMemoryAccess, up to the value of the Cap, getting perfect
116 // accuracy. Afterwards, LICM will call into MemorySSA's getDefiningAccess,
117 // which may not be precise, since optimizeUses is capped. The result is
118 // correct, but we may not get as "far up" as possible to get which access is
119 // clobbering the one queried.
120 cl::opt<unsigned> llvm::SetLicmMssaOptCap(
121     "licm-mssa-optimization-cap", cl::init(100), cl::Hidden,
122     cl::desc("Enable imprecision in LICM in pathological cases, in exchange "
123              "for faster compile. Caps the MemorySSA clobbering calls."));
124 
125 // Experimentally, memory promotion carries less importance than sinking and
126 // hoisting. Limit when we do promotion when using MemorySSA, in order to save
127 // compile time.
128 cl::opt<unsigned> llvm::SetLicmMssaNoAccForPromotionCap(
129     "licm-mssa-max-acc-promotion", cl::init(250), cl::Hidden,
130     cl::desc("[LICM & MemorySSA] When MSSA in LICM is disabled, this has no "
131              "effect. When MSSA in LICM is enabled, then this is the maximum "
132              "number of accesses allowed to be present in a loop in order to "
133              "enable memory promotion."));
134 
135 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI);
136 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
137                                   const LoopSafetyInfo *SafetyInfo,
138                                   TargetTransformInfo *TTI, bool &FreeInLoop);
139 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
140                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
141                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
142                   OptimizationRemarkEmitter *ORE);
143 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
144                  const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
145                  MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE);
146 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
147                                            const DominatorTree *DT,
148                                            const Loop *CurLoop,
149                                            const LoopSafetyInfo *SafetyInfo,
150                                            OptimizationRemarkEmitter *ORE,
151                                            const Instruction *CtxI = nullptr);
152 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
153                                      AliasSetTracker *CurAST, Loop *CurLoop,
154                                      AliasAnalysis *AA);
155 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
156                                              Loop *CurLoop,
157                                              SinkAndHoistLICMFlags &Flags);
158 static Instruction *CloneInstructionInExitBlock(
159     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
160     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU);
161 
162 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
163                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU);
164 
165 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
166                                   ICFLoopSafetyInfo &SafetyInfo,
167                                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE);
168 
169 namespace {
170 struct LoopInvariantCodeMotion {
171   using ASTrackerMapTy = DenseMap<Loop *, std::unique_ptr<AliasSetTracker>>;
172   bool runOnLoop(Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT,
173                  TargetLibraryInfo *TLI, TargetTransformInfo *TTI,
174                  ScalarEvolution *SE, MemorySSA *MSSA,
175                  OptimizationRemarkEmitter *ORE, bool DeleteAST);
176 
getLoopToAliasSetMap__anon1fedfd420111::LoopInvariantCodeMotion177   ASTrackerMapTy &getLoopToAliasSetMap() { return LoopToAliasSetMap; }
LoopInvariantCodeMotion__anon1fedfd420111::LoopInvariantCodeMotion178   LoopInvariantCodeMotion(unsigned LicmMssaOptCap,
179                           unsigned LicmMssaNoAccForPromotionCap)
180       : LicmMssaOptCap(LicmMssaOptCap),
181         LicmMssaNoAccForPromotionCap(LicmMssaNoAccForPromotionCap) {}
182 
183 private:
184   ASTrackerMapTy LoopToAliasSetMap;
185   unsigned LicmMssaOptCap;
186   unsigned LicmMssaNoAccForPromotionCap;
187 
188   std::unique_ptr<AliasSetTracker>
189   collectAliasInfoForLoop(Loop *L, LoopInfo *LI, AliasAnalysis *AA);
190   std::unique_ptr<AliasSetTracker>
191   collectAliasInfoForLoopWithMSSA(Loop *L, AliasAnalysis *AA,
192                                   MemorySSAUpdater *MSSAU);
193 };
194 
195 struct LegacyLICMPass : public LoopPass {
196   static char ID; // Pass identification, replacement for typeid
LegacyLICMPass__anon1fedfd420111::LegacyLICMPass197   LegacyLICMPass(
198       unsigned LicmMssaOptCap = SetLicmMssaOptCap,
199       unsigned LicmMssaNoAccForPromotionCap = SetLicmMssaNoAccForPromotionCap)
200       : LoopPass(ID), LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap) {
201     initializeLegacyLICMPassPass(*PassRegistry::getPassRegistry());
202   }
203 
runOnLoop__anon1fedfd420111::LegacyLICMPass204   bool runOnLoop(Loop *L, LPPassManager &LPM) override {
205     if (skipLoop(L)) {
206       // If we have run LICM on a previous loop but now we are skipping
207       // (because we've hit the opt-bisect limit), we need to clear the
208       // loop alias information.
209       LICM.getLoopToAliasSetMap().clear();
210       return false;
211     }
212 
213     auto *SE = getAnalysisIfAvailable<ScalarEvolutionWrapperPass>();
214     MemorySSA *MSSA = EnableMSSALoopDependency
215                           ? (&getAnalysis<MemorySSAWrapperPass>().getMSSA())
216                           : nullptr;
217     // For the old PM, we can't use OptimizationRemarkEmitter as an analysis
218     // pass.  Function analyses need to be preserved across loop transformations
219     // but ORE cannot be preserved (see comment before the pass definition).
220     OptimizationRemarkEmitter ORE(L->getHeader()->getParent());
221     return LICM.runOnLoop(L,
222                           &getAnalysis<AAResultsWrapperPass>().getAAResults(),
223                           &getAnalysis<LoopInfoWrapperPass>().getLoopInfo(),
224                           &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
225                           &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(
226                               *L->getHeader()->getParent()),
227                           &getAnalysis<TargetTransformInfoWrapperPass>().getTTI(
228                               *L->getHeader()->getParent()),
229                           SE ? &SE->getSE() : nullptr, MSSA, &ORE, false);
230   }
231 
232   /// This transformation requires natural loop information & requires that
233   /// loop preheaders be inserted into the CFG...
234   ///
getAnalysisUsage__anon1fedfd420111::LegacyLICMPass235   void getAnalysisUsage(AnalysisUsage &AU) const override {
236     AU.addPreserved<DominatorTreeWrapperPass>();
237     AU.addPreserved<LoopInfoWrapperPass>();
238     AU.addRequired<TargetLibraryInfoWrapperPass>();
239     if (EnableMSSALoopDependency) {
240       AU.addRequired<MemorySSAWrapperPass>();
241       AU.addPreserved<MemorySSAWrapperPass>();
242     }
243     AU.addRequired<TargetTransformInfoWrapperPass>();
244     getLoopAnalysisUsage(AU);
245   }
246 
247   using llvm::Pass::doFinalization;
248 
doFinalization__anon1fedfd420111::LegacyLICMPass249   bool doFinalization() override {
250     auto &AliasSetMap = LICM.getLoopToAliasSetMap();
251     // All loops in the AliasSetMap should be cleaned up already. The only case
252     // where we fail to do so is if an outer loop gets deleted before LICM
253     // visits it.
254     assert(all_of(AliasSetMap,
255                   [](LoopInvariantCodeMotion::ASTrackerMapTy::value_type &KV) {
256                     return !KV.first->getParentLoop();
257                   }) &&
258            "Didn't free loop alias sets");
259     AliasSetMap.clear();
260     return false;
261   }
262 
263 private:
264   LoopInvariantCodeMotion LICM;
265 
266   /// cloneBasicBlockAnalysis - Simple Analysis hook. Clone alias set info.
267   void cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To,
268                                Loop *L) override;
269 
270   /// deleteAnalysisValue - Simple Analysis hook. Delete value V from alias
271   /// set.
272   void deleteAnalysisValue(Value *V, Loop *L) override;
273 
274   /// Simple Analysis hook. Delete loop L from alias set map.
275   void deleteAnalysisLoop(Loop *L) override;
276 };
277 } // namespace
278 
run(Loop & L,LoopAnalysisManager & AM,LoopStandardAnalysisResults & AR,LPMUpdater &)279 PreservedAnalyses LICMPass::run(Loop &L, LoopAnalysisManager &AM,
280                                 LoopStandardAnalysisResults &AR, LPMUpdater &) {
281   const auto &FAM =
282       AM.getResult<FunctionAnalysisManagerLoopProxy>(L, AR).getManager();
283   Function *F = L.getHeader()->getParent();
284 
285   auto *ORE = FAM.getCachedResult<OptimizationRemarkEmitterAnalysis>(*F);
286   // FIXME: This should probably be optional rather than required.
287   if (!ORE)
288     report_fatal_error("LICM: OptimizationRemarkEmitterAnalysis not "
289                        "cached at a higher level");
290 
291   LoopInvariantCodeMotion LICM(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
292   if (!LICM.runOnLoop(&L, &AR.AA, &AR.LI, &AR.DT, &AR.TLI, &AR.TTI, &AR.SE,
293                       AR.MSSA, ORE, true))
294     return PreservedAnalyses::all();
295 
296   auto PA = getLoopPassPreservedAnalyses();
297 
298   PA.preserve<DominatorTreeAnalysis>();
299   PA.preserve<LoopAnalysis>();
300   if (AR.MSSA)
301     PA.preserve<MemorySSAAnalysis>();
302 
303   return PA;
304 }
305 
306 char LegacyLICMPass::ID = 0;
307 INITIALIZE_PASS_BEGIN(LegacyLICMPass, "licm", "Loop Invariant Code Motion",
308                       false, false)
INITIALIZE_PASS_DEPENDENCY(LoopPass)309 INITIALIZE_PASS_DEPENDENCY(LoopPass)
310 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
311 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
312 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
313 INITIALIZE_PASS_END(LegacyLICMPass, "licm", "Loop Invariant Code Motion", false,
314                     false)
315 
316 Pass *llvm::createLICMPass() { return new LegacyLICMPass(); }
createLICMPass(unsigned LicmMssaOptCap,unsigned LicmMssaNoAccForPromotionCap)317 Pass *llvm::createLICMPass(unsigned LicmMssaOptCap,
318                            unsigned LicmMssaNoAccForPromotionCap) {
319   return new LegacyLICMPass(LicmMssaOptCap, LicmMssaNoAccForPromotionCap);
320 }
321 
322 /// Hoist expressions out of the specified loop. Note, alias info for inner
323 /// loop is not preserved so it is not a good idea to run LICM multiple
324 /// times on one loop.
325 /// We should delete AST for inner loops in the new pass manager to avoid
326 /// memory leak.
327 ///
runOnLoop(Loop * L,AliasAnalysis * AA,LoopInfo * LI,DominatorTree * DT,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,ScalarEvolution * SE,MemorySSA * MSSA,OptimizationRemarkEmitter * ORE,bool DeleteAST)328 bool LoopInvariantCodeMotion::runOnLoop(
329     Loop *L, AliasAnalysis *AA, LoopInfo *LI, DominatorTree *DT,
330     TargetLibraryInfo *TLI, TargetTransformInfo *TTI, ScalarEvolution *SE,
331     MemorySSA *MSSA, OptimizationRemarkEmitter *ORE, bool DeleteAST) {
332   bool Changed = false;
333 
334   assert(L->isLCSSAForm(*DT) && "Loop is not in LCSSA form.");
335 
336   // If this loop has metadata indicating that LICM is not to be performed then
337   // just exit.
338   if (hasDisableLICMTransformsHint(L)) {
339     return false;
340   }
341 
342   std::unique_ptr<AliasSetTracker> CurAST;
343   std::unique_ptr<MemorySSAUpdater> MSSAU;
344   bool NoOfMemAccTooLarge = false;
345   unsigned LicmMssaOptCounter = 0;
346 
347   if (!MSSA) {
348     LLVM_DEBUG(dbgs() << "LICM: Using Alias Set Tracker.\n");
349     CurAST = collectAliasInfoForLoop(L, LI, AA);
350   } else {
351     LLVM_DEBUG(dbgs() << "LICM: Using MemorySSA.\n");
352     MSSAU = std::make_unique<MemorySSAUpdater>(MSSA);
353 
354     unsigned AccessCapCount = 0;
355     for (auto *BB : L->getBlocks()) {
356       if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
357         for (const auto &MA : *Accesses) {
358           (void)MA;
359           AccessCapCount++;
360           if (AccessCapCount > LicmMssaNoAccForPromotionCap) {
361             NoOfMemAccTooLarge = true;
362             break;
363           }
364         }
365       }
366       if (NoOfMemAccTooLarge)
367         break;
368     }
369   }
370 
371   // Get the preheader block to move instructions into...
372   BasicBlock *Preheader = L->getLoopPreheader();
373 
374   // Compute loop safety information.
375   ICFLoopSafetyInfo SafetyInfo(DT);
376   SafetyInfo.computeLoopSafetyInfo(L);
377 
378   // We want to visit all of the instructions in this loop... that are not parts
379   // of our subloops (they have already had their invariants hoisted out of
380   // their loop, into this loop, so there is no need to process the BODIES of
381   // the subloops).
382   //
383   // Traverse the body of the loop in depth first order on the dominator tree so
384   // that we are guaranteed to see definitions before we see uses.  This allows
385   // us to sink instructions in one pass, without iteration.  After sinking
386   // instructions, we perform another pass to hoist them out of the loop.
387   SinkAndHoistLICMFlags Flags = {NoOfMemAccTooLarge, LicmMssaOptCounter,
388                                  LicmMssaOptCap, LicmMssaNoAccForPromotionCap,
389                                  /*IsSink=*/true};
390   if (L->hasDedicatedExits())
391     Changed |= sinkRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, TTI, L,
392                           CurAST.get(), MSSAU.get(), &SafetyInfo, Flags, ORE);
393   Flags.IsSink = false;
394   if (Preheader)
395     Changed |=
396         hoistRegion(DT->getNode(L->getHeader()), AA, LI, DT, TLI, L,
397                     CurAST.get(), MSSAU.get(), SE, &SafetyInfo, Flags, ORE);
398 
399   // Now that all loop invariants have been removed from the loop, promote any
400   // memory references to scalars that we can.
401   // Don't sink stores from loops without dedicated block exits. Exits
402   // containing indirect branches are not transformed by loop simplify,
403   // make sure we catch that. An additional load may be generated in the
404   // preheader for SSA updater, so also avoid sinking when no preheader
405   // is available.
406   if (!DisablePromotion && Preheader && L->hasDedicatedExits() &&
407       !NoOfMemAccTooLarge) {
408     // Figure out the loop exits and their insertion points
409     SmallVector<BasicBlock *, 8> ExitBlocks;
410     L->getUniqueExitBlocks(ExitBlocks);
411 
412     // We can't insert into a catchswitch.
413     bool HasCatchSwitch = llvm::any_of(ExitBlocks, [](BasicBlock *Exit) {
414       return isa<CatchSwitchInst>(Exit->getTerminator());
415     });
416 
417     if (!HasCatchSwitch) {
418       SmallVector<Instruction *, 8> InsertPts;
419       SmallVector<MemoryAccess *, 8> MSSAInsertPts;
420       InsertPts.reserve(ExitBlocks.size());
421       if (MSSAU)
422         MSSAInsertPts.reserve(ExitBlocks.size());
423       for (BasicBlock *ExitBlock : ExitBlocks) {
424         InsertPts.push_back(&*ExitBlock->getFirstInsertionPt());
425         if (MSSAU)
426           MSSAInsertPts.push_back(nullptr);
427       }
428 
429       PredIteratorCache PIC;
430 
431       bool Promoted = false;
432 
433       // Build an AST using MSSA.
434       if (!CurAST.get())
435         CurAST = collectAliasInfoForLoopWithMSSA(L, AA, MSSAU.get());
436 
437       // Loop over all of the alias sets in the tracker object.
438       for (AliasSet &AS : *CurAST) {
439         // We can promote this alias set if it has a store, if it is a "Must"
440         // alias set, if the pointer is loop invariant, and if we are not
441         // eliminating any volatile loads or stores.
442         if (AS.isForwardingAliasSet() || !AS.isMod() || !AS.isMustAlias() ||
443             !L->isLoopInvariant(AS.begin()->getValue()))
444           continue;
445 
446         assert(
447             !AS.empty() &&
448             "Must alias set should have at least one pointer element in it!");
449 
450         SmallSetVector<Value *, 8> PointerMustAliases;
451         for (const auto &ASI : AS)
452           PointerMustAliases.insert(ASI.getValue());
453 
454         Promoted |= promoteLoopAccessesToScalars(
455             PointerMustAliases, ExitBlocks, InsertPts, MSSAInsertPts, PIC, LI,
456             DT, TLI, L, CurAST.get(), MSSAU.get(), &SafetyInfo, ORE);
457       }
458 
459       // Once we have promoted values across the loop body we have to
460       // recursively reform LCSSA as any nested loop may now have values defined
461       // within the loop used in the outer loop.
462       // FIXME: This is really heavy handed. It would be a bit better to use an
463       // SSAUpdater strategy during promotion that was LCSSA aware and reformed
464       // it as it went.
465       if (Promoted)
466         formLCSSARecursively(*L, *DT, LI, SE);
467 
468       Changed |= Promoted;
469     }
470   }
471 
472   // Check that neither this loop nor its parent have had LCSSA broken. LICM is
473   // specifically moving instructions across the loop boundary and so it is
474   // especially in need of sanity checking here.
475   assert(L->isLCSSAForm(*DT) && "Loop not left in LCSSA form after LICM!");
476   assert((!L->getParentLoop() || L->getParentLoop()->isLCSSAForm(*DT)) &&
477          "Parent loop not left in LCSSA form after LICM!");
478 
479   // If this loop is nested inside of another one, save the alias information
480   // for when we process the outer loop.
481   if (!MSSAU.get() && CurAST.get() && L->getParentLoop() && !DeleteAST)
482     LoopToAliasSetMap[L] = std::move(CurAST);
483 
484   if (MSSAU.get() && VerifyMemorySSA)
485     MSSAU->getMemorySSA()->verifyMemorySSA();
486 
487   if (Changed && SE)
488     SE->forgetLoopDispositions(L);
489   return Changed;
490 }
491 
492 /// Walk the specified region of the CFG (defined by all blocks dominated by
493 /// the specified block, and that are in the current loop) in reverse depth
494 /// first order w.r.t the DominatorTree.  This allows us to visit uses before
495 /// definitions, allowing us to sink a loop body in one pass without iteration.
496 ///
sinkRegion(DomTreeNode * N,AliasAnalysis * AA,LoopInfo * LI,DominatorTree * DT,TargetLibraryInfo * TLI,TargetTransformInfo * TTI,Loop * CurLoop,AliasSetTracker * CurAST,MemorySSAUpdater * MSSAU,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE)497 bool llvm::sinkRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
498                       DominatorTree *DT, TargetLibraryInfo *TLI,
499                       TargetTransformInfo *TTI, Loop *CurLoop,
500                       AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
501                       ICFLoopSafetyInfo *SafetyInfo,
502                       SinkAndHoistLICMFlags &Flags,
503                       OptimizationRemarkEmitter *ORE) {
504 
505   // Verify inputs.
506   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
507          CurLoop != nullptr && SafetyInfo != nullptr &&
508          "Unexpected input to sinkRegion.");
509   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
510          "Either AliasSetTracker or MemorySSA should be initialized.");
511 
512   // We want to visit children before parents. We will enque all the parents
513   // before their children in the worklist and process the worklist in reverse
514   // order.
515   SmallVector<DomTreeNode *, 16> Worklist = collectChildrenInLoop(N, CurLoop);
516 
517   bool Changed = false;
518   for (DomTreeNode *DTN : reverse(Worklist)) {
519     BasicBlock *BB = DTN->getBlock();
520     // Only need to process the contents of this block if it is not part of a
521     // subloop (which would already have been processed).
522     if (inSubLoop(BB, CurLoop, LI))
523       continue;
524 
525     for (BasicBlock::iterator II = BB->end(); II != BB->begin();) {
526       Instruction &I = *--II;
527 
528       // If the instruction is dead, we would try to sink it because it isn't
529       // used in the loop, instead, just delete it.
530       if (isInstructionTriviallyDead(&I, TLI)) {
531         LLVM_DEBUG(dbgs() << "LICM deleting dead inst: " << I << '\n');
532         salvageDebugInfo(I);
533         ++II;
534         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
535         Changed = true;
536         continue;
537       }
538 
539       // Check to see if we can sink this instruction to the exit blocks
540       // of the loop.  We can do this if the all users of the instruction are
541       // outside of the loop.  In this case, it doesn't even matter if the
542       // operands of the instruction are loop invariant.
543       //
544       bool FreeInLoop = false;
545       if (isNotUsedOrFreeInLoop(I, CurLoop, SafetyInfo, TTI, FreeInLoop) &&
546           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
547                              ORE) &&
548           !I.mayHaveSideEffects()) {
549         if (sink(I, LI, DT, CurLoop, SafetyInfo, MSSAU, ORE)) {
550           if (!FreeInLoop) {
551             ++II;
552             eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
553           }
554           Changed = true;
555         }
556       }
557     }
558   }
559   if (MSSAU && VerifyMemorySSA)
560     MSSAU->getMemorySSA()->verifyMemorySSA();
561   return Changed;
562 }
563 
564 namespace {
565 // This is a helper class for hoistRegion to make it able to hoist control flow
566 // in order to be able to hoist phis. The way this works is that we initially
567 // start hoisting to the loop preheader, and when we see a loop invariant branch
568 // we make note of this. When we then come to hoist an instruction that's
569 // conditional on such a branch we duplicate the branch and the relevant control
570 // flow, then hoist the instruction into the block corresponding to its original
571 // block in the duplicated control flow.
572 class ControlFlowHoister {
573 private:
574   // Information about the loop we are hoisting from
575   LoopInfo *LI;
576   DominatorTree *DT;
577   Loop *CurLoop;
578   MemorySSAUpdater *MSSAU;
579 
580   // A map of blocks in the loop to the block their instructions will be hoisted
581   // to.
582   DenseMap<BasicBlock *, BasicBlock *> HoistDestinationMap;
583 
584   // The branches that we can hoist, mapped to the block that marks a
585   // convergence point of their control flow.
586   DenseMap<BranchInst *, BasicBlock *> HoistableBranches;
587 
588 public:
ControlFlowHoister(LoopInfo * LI,DominatorTree * DT,Loop * CurLoop,MemorySSAUpdater * MSSAU)589   ControlFlowHoister(LoopInfo *LI, DominatorTree *DT, Loop *CurLoop,
590                      MemorySSAUpdater *MSSAU)
591       : LI(LI), DT(DT), CurLoop(CurLoop), MSSAU(MSSAU) {}
592 
registerPossiblyHoistableBranch(BranchInst * BI)593   void registerPossiblyHoistableBranch(BranchInst *BI) {
594     // We can only hoist conditional branches with loop invariant operands.
595     if (!ControlFlowHoisting || !BI->isConditional() ||
596         !CurLoop->hasLoopInvariantOperands(BI))
597       return;
598 
599     // The branch destinations need to be in the loop, and we don't gain
600     // anything by duplicating conditional branches with duplicate successors,
601     // as it's essentially the same as an unconditional branch.
602     BasicBlock *TrueDest = BI->getSuccessor(0);
603     BasicBlock *FalseDest = BI->getSuccessor(1);
604     if (!CurLoop->contains(TrueDest) || !CurLoop->contains(FalseDest) ||
605         TrueDest == FalseDest)
606       return;
607 
608     // We can hoist BI if one branch destination is the successor of the other,
609     // or both have common successor which we check by seeing if the
610     // intersection of their successors is non-empty.
611     // TODO: This could be expanded to allowing branches where both ends
612     // eventually converge to a single block.
613     SmallPtrSet<BasicBlock *, 4> TrueDestSucc, FalseDestSucc;
614     TrueDestSucc.insert(succ_begin(TrueDest), succ_end(TrueDest));
615     FalseDestSucc.insert(succ_begin(FalseDest), succ_end(FalseDest));
616     BasicBlock *CommonSucc = nullptr;
617     if (TrueDestSucc.count(FalseDest)) {
618       CommonSucc = FalseDest;
619     } else if (FalseDestSucc.count(TrueDest)) {
620       CommonSucc = TrueDest;
621     } else {
622       set_intersect(TrueDestSucc, FalseDestSucc);
623       // If there's one common successor use that.
624       if (TrueDestSucc.size() == 1)
625         CommonSucc = *TrueDestSucc.begin();
626       // If there's more than one pick whichever appears first in the block list
627       // (we can't use the value returned by TrueDestSucc.begin() as it's
628       // unpredicatable which element gets returned).
629       else if (!TrueDestSucc.empty()) {
630         Function *F = TrueDest->getParent();
631         auto IsSucc = [&](BasicBlock &BB) { return TrueDestSucc.count(&BB); };
632         auto It = std::find_if(F->begin(), F->end(), IsSucc);
633         assert(It != F->end() && "Could not find successor in function");
634         CommonSucc = &*It;
635       }
636     }
637     // The common successor has to be dominated by the branch, as otherwise
638     // there will be some other path to the successor that will not be
639     // controlled by this branch so any phi we hoist would be controlled by the
640     // wrong condition. This also takes care of avoiding hoisting of loop back
641     // edges.
642     // TODO: In some cases this could be relaxed if the successor is dominated
643     // by another block that's been hoisted and we can guarantee that the
644     // control flow has been replicated exactly.
645     if (CommonSucc && DT->dominates(BI, CommonSucc))
646       HoistableBranches[BI] = CommonSucc;
647   }
648 
canHoistPHI(PHINode * PN)649   bool canHoistPHI(PHINode *PN) {
650     // The phi must have loop invariant operands.
651     if (!ControlFlowHoisting || !CurLoop->hasLoopInvariantOperands(PN))
652       return false;
653     // We can hoist phis if the block they are in is the target of hoistable
654     // branches which cover all of the predecessors of the block.
655     SmallPtrSet<BasicBlock *, 8> PredecessorBlocks;
656     BasicBlock *BB = PN->getParent();
657     for (BasicBlock *PredBB : predecessors(BB))
658       PredecessorBlocks.insert(PredBB);
659     // If we have less predecessor blocks than predecessors then the phi will
660     // have more than one incoming value for the same block which we can't
661     // handle.
662     // TODO: This could be handled be erasing some of the duplicate incoming
663     // values.
664     if (PredecessorBlocks.size() != pred_size(BB))
665       return false;
666     for (auto &Pair : HoistableBranches) {
667       if (Pair.second == BB) {
668         // Which blocks are predecessors via this branch depends on if the
669         // branch is triangle-like or diamond-like.
670         if (Pair.first->getSuccessor(0) == BB) {
671           PredecessorBlocks.erase(Pair.first->getParent());
672           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
673         } else if (Pair.first->getSuccessor(1) == BB) {
674           PredecessorBlocks.erase(Pair.first->getParent());
675           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
676         } else {
677           PredecessorBlocks.erase(Pair.first->getSuccessor(0));
678           PredecessorBlocks.erase(Pair.first->getSuccessor(1));
679         }
680       }
681     }
682     // PredecessorBlocks will now be empty if for every predecessor of BB we
683     // found a hoistable branch source.
684     return PredecessorBlocks.empty();
685   }
686 
getOrCreateHoistedBlock(BasicBlock * BB)687   BasicBlock *getOrCreateHoistedBlock(BasicBlock *BB) {
688     if (!ControlFlowHoisting)
689       return CurLoop->getLoopPreheader();
690     // If BB has already been hoisted, return that
691     if (HoistDestinationMap.count(BB))
692       return HoistDestinationMap[BB];
693 
694     // Check if this block is conditional based on a pending branch
695     auto HasBBAsSuccessor =
696         [&](DenseMap<BranchInst *, BasicBlock *>::value_type &Pair) {
697           return BB != Pair.second && (Pair.first->getSuccessor(0) == BB ||
698                                        Pair.first->getSuccessor(1) == BB);
699         };
700     auto It = std::find_if(HoistableBranches.begin(), HoistableBranches.end(),
701                            HasBBAsSuccessor);
702 
703     // If not involved in a pending branch, hoist to preheader
704     BasicBlock *InitialPreheader = CurLoop->getLoopPreheader();
705     if (It == HoistableBranches.end()) {
706       LLVM_DEBUG(dbgs() << "LICM using " << InitialPreheader->getName()
707                         << " as hoist destination for " << BB->getName()
708                         << "\n");
709       HoistDestinationMap[BB] = InitialPreheader;
710       return InitialPreheader;
711     }
712     BranchInst *BI = It->first;
713     assert(std::find_if(++It, HoistableBranches.end(), HasBBAsSuccessor) ==
714                HoistableBranches.end() &&
715            "BB is expected to be the target of at most one branch");
716 
717     LLVMContext &C = BB->getContext();
718     BasicBlock *TrueDest = BI->getSuccessor(0);
719     BasicBlock *FalseDest = BI->getSuccessor(1);
720     BasicBlock *CommonSucc = HoistableBranches[BI];
721     BasicBlock *HoistTarget = getOrCreateHoistedBlock(BI->getParent());
722 
723     // Create hoisted versions of blocks that currently don't have them
724     auto CreateHoistedBlock = [&](BasicBlock *Orig) {
725       if (HoistDestinationMap.count(Orig))
726         return HoistDestinationMap[Orig];
727       BasicBlock *New =
728           BasicBlock::Create(C, Orig->getName() + ".licm", Orig->getParent());
729       HoistDestinationMap[Orig] = New;
730       DT->addNewBlock(New, HoistTarget);
731       if (CurLoop->getParentLoop())
732         CurLoop->getParentLoop()->addBasicBlockToLoop(New, *LI);
733       ++NumCreatedBlocks;
734       LLVM_DEBUG(dbgs() << "LICM created " << New->getName()
735                         << " as hoist destination for " << Orig->getName()
736                         << "\n");
737       return New;
738     };
739     BasicBlock *HoistTrueDest = CreateHoistedBlock(TrueDest);
740     BasicBlock *HoistFalseDest = CreateHoistedBlock(FalseDest);
741     BasicBlock *HoistCommonSucc = CreateHoistedBlock(CommonSucc);
742 
743     // Link up these blocks with branches.
744     if (!HoistCommonSucc->getTerminator()) {
745       // The new common successor we've generated will branch to whatever that
746       // hoist target branched to.
747       BasicBlock *TargetSucc = HoistTarget->getSingleSuccessor();
748       assert(TargetSucc && "Expected hoist target to have a single successor");
749       HoistCommonSucc->moveBefore(TargetSucc);
750       BranchInst::Create(TargetSucc, HoistCommonSucc);
751     }
752     if (!HoistTrueDest->getTerminator()) {
753       HoistTrueDest->moveBefore(HoistCommonSucc);
754       BranchInst::Create(HoistCommonSucc, HoistTrueDest);
755     }
756     if (!HoistFalseDest->getTerminator()) {
757       HoistFalseDest->moveBefore(HoistCommonSucc);
758       BranchInst::Create(HoistCommonSucc, HoistFalseDest);
759     }
760 
761     // If BI is being cloned to what was originally the preheader then
762     // HoistCommonSucc will now be the new preheader.
763     if (HoistTarget == InitialPreheader) {
764       // Phis in the loop header now need to use the new preheader.
765       InitialPreheader->replaceSuccessorsPhiUsesWith(HoistCommonSucc);
766       if (MSSAU)
767         MSSAU->wireOldPredecessorsToNewImmediatePredecessor(
768             HoistTarget->getSingleSuccessor(), HoistCommonSucc, {HoistTarget});
769       // The new preheader dominates the loop header.
770       DomTreeNode *PreheaderNode = DT->getNode(HoistCommonSucc);
771       DomTreeNode *HeaderNode = DT->getNode(CurLoop->getHeader());
772       DT->changeImmediateDominator(HeaderNode, PreheaderNode);
773       // The preheader hoist destination is now the new preheader, with the
774       // exception of the hoist destination of this branch.
775       for (auto &Pair : HoistDestinationMap)
776         if (Pair.second == InitialPreheader && Pair.first != BI->getParent())
777           Pair.second = HoistCommonSucc;
778     }
779 
780     // Now finally clone BI.
781     ReplaceInstWithInst(
782         HoistTarget->getTerminator(),
783         BranchInst::Create(HoistTrueDest, HoistFalseDest, BI->getCondition()));
784     ++NumClonedBranches;
785 
786     assert(CurLoop->getLoopPreheader() &&
787            "Hoisting blocks should not have destroyed preheader");
788     return HoistDestinationMap[BB];
789   }
790 };
791 } // namespace
792 
793 /// Walk the specified region of the CFG (defined by all blocks dominated by
794 /// the specified block, and that are in the current loop) in depth first
795 /// order w.r.t the DominatorTree.  This allows us to visit definitions before
796 /// uses, allowing us to hoist a loop body in one pass without iteration.
797 ///
hoistRegion(DomTreeNode * N,AliasAnalysis * AA,LoopInfo * LI,DominatorTree * DT,TargetLibraryInfo * TLI,Loop * CurLoop,AliasSetTracker * CurAST,MemorySSAUpdater * MSSAU,ScalarEvolution * SE,ICFLoopSafetyInfo * SafetyInfo,SinkAndHoistLICMFlags & Flags,OptimizationRemarkEmitter * ORE)798 bool llvm::hoistRegion(DomTreeNode *N, AliasAnalysis *AA, LoopInfo *LI,
799                        DominatorTree *DT, TargetLibraryInfo *TLI, Loop *CurLoop,
800                        AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
801                        ScalarEvolution *SE, ICFLoopSafetyInfo *SafetyInfo,
802                        SinkAndHoistLICMFlags &Flags,
803                        OptimizationRemarkEmitter *ORE) {
804   // Verify inputs.
805   assert(N != nullptr && AA != nullptr && LI != nullptr && DT != nullptr &&
806          CurLoop != nullptr && SafetyInfo != nullptr &&
807          "Unexpected input to hoistRegion.");
808   assert(((CurAST != nullptr) ^ (MSSAU != nullptr)) &&
809          "Either AliasSetTracker or MemorySSA should be initialized.");
810 
811   ControlFlowHoister CFH(LI, DT, CurLoop, MSSAU);
812 
813   // Keep track of instructions that have been hoisted, as they may need to be
814   // re-hoisted if they end up not dominating all of their uses.
815   SmallVector<Instruction *, 16> HoistedInstructions;
816 
817   // For PHI hoisting to work we need to hoist blocks before their successors.
818   // We can do this by iterating through the blocks in the loop in reverse
819   // post-order.
820   LoopBlocksRPO Worklist(CurLoop);
821   Worklist.perform(LI);
822   bool Changed = false;
823   for (BasicBlock *BB : Worklist) {
824     // Only need to process the contents of this block if it is not part of a
825     // subloop (which would already have been processed).
826     if (inSubLoop(BB, CurLoop, LI))
827       continue;
828 
829     for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;) {
830       Instruction &I = *II++;
831       // Try constant folding this instruction.  If all the operands are
832       // constants, it is technically hoistable, but it would be better to
833       // just fold it.
834       if (Constant *C = ConstantFoldInstruction(
835               &I, I.getModule()->getDataLayout(), TLI)) {
836         LLVM_DEBUG(dbgs() << "LICM folding inst: " << I << "  --> " << *C
837                           << '\n');
838         if (CurAST)
839           CurAST->copyValue(&I, C);
840         // FIXME MSSA: Such replacements may make accesses unoptimized (D51960).
841         I.replaceAllUsesWith(C);
842         if (isInstructionTriviallyDead(&I, TLI))
843           eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
844         Changed = true;
845         continue;
846       }
847 
848       // Try hoisting the instruction out to the preheader.  We can only do
849       // this if all of the operands of the instruction are loop invariant and
850       // if it is safe to hoist the instruction.
851       // TODO: It may be safe to hoist if we are hoisting to a conditional block
852       // and we have accurately duplicated the control flow from the loop header
853       // to that block.
854       if (CurLoop->hasLoopInvariantOperands(&I) &&
855           canSinkOrHoistInst(I, AA, DT, CurLoop, CurAST, MSSAU, true, &Flags,
856                              ORE) &&
857           isSafeToExecuteUnconditionally(
858               I, DT, CurLoop, SafetyInfo, ORE,
859               CurLoop->getLoopPreheader()->getTerminator())) {
860         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
861               MSSAU, SE, ORE);
862         HoistedInstructions.push_back(&I);
863         Changed = true;
864         continue;
865       }
866 
867       // Attempt to remove floating point division out of the loop by
868       // converting it to a reciprocal multiplication.
869       if (I.getOpcode() == Instruction::FDiv &&
870           CurLoop->isLoopInvariant(I.getOperand(1)) &&
871           I.hasAllowReciprocal()) {
872         auto Divisor = I.getOperand(1);
873         auto One = llvm::ConstantFP::get(Divisor->getType(), 1.0);
874         auto ReciprocalDivisor = BinaryOperator::CreateFDiv(One, Divisor);
875         ReciprocalDivisor->setFastMathFlags(I.getFastMathFlags());
876         SafetyInfo->insertInstructionTo(ReciprocalDivisor, I.getParent());
877         ReciprocalDivisor->insertBefore(&I);
878 
879         auto Product =
880             BinaryOperator::CreateFMul(I.getOperand(0), ReciprocalDivisor);
881         Product->setFastMathFlags(I.getFastMathFlags());
882         SafetyInfo->insertInstructionTo(Product, I.getParent());
883         Product->insertAfter(&I);
884         I.replaceAllUsesWith(Product);
885         eraseInstruction(I, *SafetyInfo, CurAST, MSSAU);
886 
887         hoist(*ReciprocalDivisor, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB),
888               SafetyInfo, MSSAU, SE, ORE);
889         HoistedInstructions.push_back(ReciprocalDivisor);
890         Changed = true;
891         continue;
892       }
893 
894       auto IsInvariantStart = [&](Instruction &I) {
895         using namespace PatternMatch;
896         return I.use_empty() &&
897                match(&I, m_Intrinsic<Intrinsic::invariant_start>());
898       };
899       auto MustExecuteWithoutWritesBefore = [&](Instruction &I) {
900         return SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop) &&
901                SafetyInfo->doesNotWriteMemoryBefore(I, CurLoop);
902       };
903       if ((IsInvariantStart(I) || isGuard(&I)) &&
904           CurLoop->hasLoopInvariantOperands(&I) &&
905           MustExecuteWithoutWritesBefore(I)) {
906         hoist(I, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
907               MSSAU, SE, ORE);
908         HoistedInstructions.push_back(&I);
909         Changed = true;
910         continue;
911       }
912 
913       if (PHINode *PN = dyn_cast<PHINode>(&I)) {
914         if (CFH.canHoistPHI(PN)) {
915           // Redirect incoming blocks first to ensure that we create hoisted
916           // versions of those blocks before we hoist the phi.
917           for (unsigned int i = 0; i < PN->getNumIncomingValues(); ++i)
918             PN->setIncomingBlock(
919                 i, CFH.getOrCreateHoistedBlock(PN->getIncomingBlock(i)));
920           hoist(*PN, DT, CurLoop, CFH.getOrCreateHoistedBlock(BB), SafetyInfo,
921                 MSSAU, SE, ORE);
922           assert(DT->dominates(PN, BB) && "Conditional PHIs not expected");
923           Changed = true;
924           continue;
925         }
926       }
927 
928       // Remember possibly hoistable branches so we can actually hoist them
929       // later if needed.
930       if (BranchInst *BI = dyn_cast<BranchInst>(&I))
931         CFH.registerPossiblyHoistableBranch(BI);
932     }
933   }
934 
935   // If we hoisted instructions to a conditional block they may not dominate
936   // their uses that weren't hoisted (such as phis where some operands are not
937   // loop invariant). If so make them unconditional by moving them to their
938   // immediate dominator. We iterate through the instructions in reverse order
939   // which ensures that when we rehoist an instruction we rehoist its operands,
940   // and also keep track of where in the block we are rehoisting to to make sure
941   // that we rehoist instructions before the instructions that use them.
942   Instruction *HoistPoint = nullptr;
943   if (ControlFlowHoisting) {
944     for (Instruction *I : reverse(HoistedInstructions)) {
945       if (!llvm::all_of(I->uses(),
946                         [&](Use &U) { return DT->dominates(I, U); })) {
947         BasicBlock *Dominator =
948             DT->getNode(I->getParent())->getIDom()->getBlock();
949         if (!HoistPoint || !DT->dominates(HoistPoint->getParent(), Dominator)) {
950           if (HoistPoint)
951             assert(DT->dominates(Dominator, HoistPoint->getParent()) &&
952                    "New hoist point expected to dominate old hoist point");
953           HoistPoint = Dominator->getTerminator();
954         }
955         LLVM_DEBUG(dbgs() << "LICM rehoisting to "
956                           << HoistPoint->getParent()->getName()
957                           << ": " << *I << "\n");
958         moveInstructionBefore(*I, *HoistPoint, *SafetyInfo, MSSAU, SE);
959         HoistPoint = I;
960         Changed = true;
961       }
962     }
963   }
964   if (MSSAU && VerifyMemorySSA)
965     MSSAU->getMemorySSA()->verifyMemorySSA();
966 
967     // Now that we've finished hoisting make sure that LI and DT are still
968     // valid.
969 #ifdef EXPENSIVE_CHECKS
970   if (Changed) {
971     assert(DT->verify(DominatorTree::VerificationLevel::Fast) &&
972            "Dominator tree verification failed");
973     LI->verify(*DT);
974   }
975 #endif
976 
977   return Changed;
978 }
979 
980 // Return true if LI is invariant within scope of the loop. LI is invariant if
981 // CurLoop is dominated by an invariant.start representing the same memory
982 // location and size as the memory location LI loads from, and also the
983 // invariant.start has no uses.
isLoadInvariantInLoop(LoadInst * LI,DominatorTree * DT,Loop * CurLoop)984 static bool isLoadInvariantInLoop(LoadInst *LI, DominatorTree *DT,
985                                   Loop *CurLoop) {
986   Value *Addr = LI->getOperand(0);
987   const DataLayout &DL = LI->getModule()->getDataLayout();
988   const uint32_t LocSizeInBits = DL.getTypeSizeInBits(LI->getType());
989 
990   // if the type is i8 addrspace(x)*, we know this is the type of
991   // llvm.invariant.start operand
992   auto *PtrInt8Ty = PointerType::get(Type::getInt8Ty(LI->getContext()),
993                                      LI->getPointerAddressSpace());
994   unsigned BitcastsVisited = 0;
995   // Look through bitcasts until we reach the i8* type (this is invariant.start
996   // operand type).
997   while (Addr->getType() != PtrInt8Ty) {
998     auto *BC = dyn_cast<BitCastInst>(Addr);
999     // Avoid traversing high number of bitcast uses.
1000     if (++BitcastsVisited > MaxNumUsesTraversed || !BC)
1001       return false;
1002     Addr = BC->getOperand(0);
1003   }
1004 
1005   unsigned UsesVisited = 0;
1006   // Traverse all uses of the load operand value, to see if invariant.start is
1007   // one of the uses, and whether it dominates the load instruction.
1008   for (auto *U : Addr->users()) {
1009     // Avoid traversing for Load operand with high number of users.
1010     if (++UsesVisited > MaxNumUsesTraversed)
1011       return false;
1012     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
1013     // If there are escaping uses of invariant.start instruction, the load maybe
1014     // non-invariant.
1015     if (!II || II->getIntrinsicID() != Intrinsic::invariant_start ||
1016         !II->use_empty())
1017       continue;
1018     unsigned InvariantSizeInBits =
1019         cast<ConstantInt>(II->getArgOperand(0))->getSExtValue() * 8;
1020     // Confirm the invariant.start location size contains the load operand size
1021     // in bits. Also, the invariant.start should dominate the load, and we
1022     // should not hoist the load out of a loop that contains this dominating
1023     // invariant.start.
1024     if (LocSizeInBits <= InvariantSizeInBits &&
1025         DT->properlyDominates(II->getParent(), CurLoop->getHeader()))
1026       return true;
1027   }
1028 
1029   return false;
1030 }
1031 
1032 namespace {
1033 /// Return true if-and-only-if we know how to (mechanically) both hoist and
1034 /// sink a given instruction out of a loop.  Does not address legality
1035 /// concerns such as aliasing or speculation safety.
isHoistableAndSinkableInst(Instruction & I)1036 bool isHoistableAndSinkableInst(Instruction &I) {
1037   // Only these instructions are hoistable/sinkable.
1038   return (isa<LoadInst>(I) || isa<StoreInst>(I) || isa<CallInst>(I) ||
1039           isa<FenceInst>(I) || isa<CastInst>(I) ||
1040           isa<UnaryOperator>(I) || isa<BinaryOperator>(I) ||
1041           isa<SelectInst>(I) || isa<GetElementPtrInst>(I) || isa<CmpInst>(I) ||
1042           isa<InsertElementInst>(I) || isa<ExtractElementInst>(I) ||
1043           isa<ShuffleVectorInst>(I) || isa<ExtractValueInst>(I) ||
1044           isa<InsertValueInst>(I));
1045 }
1046 /// Return true if all of the alias sets within this AST are known not to
1047 /// contain a Mod, or if MSSA knows thare are no MemoryDefs in the loop.
isReadOnly(AliasSetTracker * CurAST,const MemorySSAUpdater * MSSAU,const Loop * L)1048 bool isReadOnly(AliasSetTracker *CurAST, const MemorySSAUpdater *MSSAU,
1049                 const Loop *L) {
1050   if (CurAST) {
1051     for (AliasSet &AS : *CurAST) {
1052       if (!AS.isForwardingAliasSet() && AS.isMod()) {
1053         return false;
1054       }
1055     }
1056     return true;
1057   } else { /*MSSAU*/
1058     for (auto *BB : L->getBlocks())
1059       if (MSSAU->getMemorySSA()->getBlockDefs(BB))
1060         return false;
1061     return true;
1062   }
1063 }
1064 
1065 /// Return true if I is the only Instruction with a MemoryAccess in L.
isOnlyMemoryAccess(const Instruction * I,const Loop * L,const MemorySSAUpdater * MSSAU)1066 bool isOnlyMemoryAccess(const Instruction *I, const Loop *L,
1067                         const MemorySSAUpdater *MSSAU) {
1068   for (auto *BB : L->getBlocks())
1069     if (auto *Accs = MSSAU->getMemorySSA()->getBlockAccesses(BB)) {
1070       int NotAPhi = 0;
1071       for (const auto &Acc : *Accs) {
1072         if (isa<MemoryPhi>(&Acc))
1073           continue;
1074         const auto *MUD = cast<MemoryUseOrDef>(&Acc);
1075         if (MUD->getMemoryInst() != I || NotAPhi++ == 1)
1076           return false;
1077       }
1078     }
1079   return true;
1080 }
1081 }
1082 
canSinkOrHoistInst(Instruction & I,AAResults * AA,DominatorTree * DT,Loop * CurLoop,AliasSetTracker * CurAST,MemorySSAUpdater * MSSAU,bool TargetExecutesOncePerLoop,SinkAndHoistLICMFlags * Flags,OptimizationRemarkEmitter * ORE)1083 bool llvm::canSinkOrHoistInst(Instruction &I, AAResults *AA, DominatorTree *DT,
1084                               Loop *CurLoop, AliasSetTracker *CurAST,
1085                               MemorySSAUpdater *MSSAU,
1086                               bool TargetExecutesOncePerLoop,
1087                               SinkAndHoistLICMFlags *Flags,
1088                               OptimizationRemarkEmitter *ORE) {
1089   // If we don't understand the instruction, bail early.
1090   if (!isHoistableAndSinkableInst(I))
1091     return false;
1092 
1093   MemorySSA *MSSA = MSSAU ? MSSAU->getMemorySSA() : nullptr;
1094   if (MSSA)
1095     assert(Flags != nullptr && "Flags cannot be null.");
1096 
1097   // Loads have extra constraints we have to verify before we can hoist them.
1098   if (LoadInst *LI = dyn_cast<LoadInst>(&I)) {
1099     if (!LI->isUnordered())
1100       return false; // Don't sink/hoist volatile or ordered atomic loads!
1101 
1102     // Loads from constant memory are always safe to move, even if they end up
1103     // in the same alias set as something that ends up being modified.
1104     if (AA->pointsToConstantMemory(LI->getOperand(0)))
1105       return true;
1106     if (LI->hasMetadata(LLVMContext::MD_invariant_load))
1107       return true;
1108 
1109     if (LI->isAtomic() && !TargetExecutesOncePerLoop)
1110       return false; // Don't risk duplicating unordered loads
1111 
1112     // This checks for an invariant.start dominating the load.
1113     if (isLoadInvariantInLoop(LI, DT, CurLoop))
1114       return true;
1115 
1116     bool Invalidated;
1117     if (CurAST)
1118       Invalidated = pointerInvalidatedByLoop(MemoryLocation::get(LI), CurAST,
1119                                              CurLoop, AA);
1120     else
1121       Invalidated = pointerInvalidatedByLoopWithMSSA(
1122           MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(LI)), CurLoop, *Flags);
1123     // Check loop-invariant address because this may also be a sinkable load
1124     // whose address is not necessarily loop-invariant.
1125     if (ORE && Invalidated && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1126       ORE->emit([&]() {
1127         return OptimizationRemarkMissed(
1128                    DEBUG_TYPE, "LoadWithLoopInvariantAddressInvalidated", LI)
1129                << "failed to move load with loop-invariant address "
1130                   "because the loop may invalidate its value";
1131       });
1132 
1133     return !Invalidated;
1134   } else if (CallInst *CI = dyn_cast<CallInst>(&I)) {
1135     // Don't sink or hoist dbg info; it's legal, but not useful.
1136     if (isa<DbgInfoIntrinsic>(I))
1137       return false;
1138 
1139     // Don't sink calls which can throw.
1140     if (CI->mayThrow())
1141       return false;
1142 
1143     using namespace PatternMatch;
1144     if (match(CI, m_Intrinsic<Intrinsic::assume>()))
1145       // Assumes don't actually alias anything or throw
1146       return true;
1147 
1148     if (match(CI, m_Intrinsic<Intrinsic::experimental_widenable_condition>()))
1149       // Widenable conditions don't actually alias anything or throw
1150       return true;
1151 
1152     // Handle simple cases by querying alias analysis.
1153     FunctionModRefBehavior Behavior = AA->getModRefBehavior(CI);
1154     if (Behavior == FMRB_DoesNotAccessMemory)
1155       return true;
1156     if (AliasAnalysis::onlyReadsMemory(Behavior)) {
1157       // A readonly argmemonly function only reads from memory pointed to by
1158       // it's arguments with arbitrary offsets.  If we can prove there are no
1159       // writes to this memory in the loop, we can hoist or sink.
1160       if (AliasAnalysis::onlyAccessesArgPointees(Behavior)) {
1161         // TODO: expand to writeable arguments
1162         for (Value *Op : CI->arg_operands())
1163           if (Op->getType()->isPointerTy()) {
1164             bool Invalidated;
1165             if (CurAST)
1166               Invalidated = pointerInvalidatedByLoop(
1167                   MemoryLocation(Op, LocationSize::unknown(), AAMDNodes()),
1168                   CurAST, CurLoop, AA);
1169             else
1170               Invalidated = pointerInvalidatedByLoopWithMSSA(
1171                   MSSA, cast<MemoryUse>(MSSA->getMemoryAccess(CI)), CurLoop,
1172                   *Flags);
1173             if (Invalidated)
1174               return false;
1175           }
1176         return true;
1177       }
1178 
1179       // If this call only reads from memory and there are no writes to memory
1180       // in the loop, we can hoist or sink the call as appropriate.
1181       if (isReadOnly(CurAST, MSSAU, CurLoop))
1182         return true;
1183     }
1184 
1185     // FIXME: This should use mod/ref information to see if we can hoist or
1186     // sink the call.
1187 
1188     return false;
1189   } else if (auto *FI = dyn_cast<FenceInst>(&I)) {
1190     // Fences alias (most) everything to provide ordering.  For the moment,
1191     // just give up if there are any other memory operations in the loop.
1192     if (CurAST) {
1193       auto Begin = CurAST->begin();
1194       assert(Begin != CurAST->end() && "must contain FI");
1195       if (std::next(Begin) != CurAST->end())
1196         // constant memory for instance, TODO: handle better
1197         return false;
1198       auto *UniqueI = Begin->getUniqueInstruction();
1199       if (!UniqueI)
1200         // other memory op, give up
1201         return false;
1202       (void)FI; // suppress unused variable warning
1203       assert(UniqueI == FI && "AS must contain FI");
1204       return true;
1205     } else // MSSAU
1206       return isOnlyMemoryAccess(FI, CurLoop, MSSAU);
1207   } else if (auto *SI = dyn_cast<StoreInst>(&I)) {
1208     if (!SI->isUnordered())
1209       return false; // Don't sink/hoist volatile or ordered atomic store!
1210 
1211     // We can only hoist a store that we can prove writes a value which is not
1212     // read or overwritten within the loop.  For those cases, we fallback to
1213     // load store promotion instead.  TODO: We can extend this to cases where
1214     // there is exactly one write to the location and that write dominates an
1215     // arbitrary number of reads in the loop.
1216     if (CurAST) {
1217       auto &AS = CurAST->getAliasSetFor(MemoryLocation::get(SI));
1218 
1219       if (AS.isRef() || !AS.isMustAlias())
1220         // Quick exit test, handled by the full path below as well.
1221         return false;
1222       auto *UniqueI = AS.getUniqueInstruction();
1223       if (!UniqueI)
1224         // other memory op, give up
1225         return false;
1226       assert(UniqueI == SI && "AS must contain SI");
1227       return true;
1228     } else { // MSSAU
1229       if (isOnlyMemoryAccess(SI, CurLoop, MSSAU))
1230         return true;
1231       // If there are more accesses than the Promotion cap, give up, we're not
1232       // walking a list that long.
1233       if (Flags->NoOfMemAccTooLarge)
1234         return false;
1235       // Check store only if there's still "quota" to check clobber.
1236       if (Flags->LicmMssaOptCounter >= Flags->LicmMssaOptCap)
1237         return false;
1238       // If there are interfering Uses (i.e. their defining access is in the
1239       // loop), or ordered loads (stored as Defs!), don't move this store.
1240       // Could do better here, but this is conservatively correct.
1241       // TODO: Cache set of Uses on the first walk in runOnLoop, update when
1242       // moving accesses. Can also extend to dominating uses.
1243       auto *SIMD = MSSA->getMemoryAccess(SI);
1244       for (auto *BB : CurLoop->getBlocks())
1245         if (auto *Accesses = MSSA->getBlockAccesses(BB)) {
1246           for (const auto &MA : *Accesses)
1247             if (const auto *MU = dyn_cast<MemoryUse>(&MA)) {
1248               auto *MD = MU->getDefiningAccess();
1249               if (!MSSA->isLiveOnEntryDef(MD) &&
1250                   CurLoop->contains(MD->getBlock()))
1251                 return false;
1252               // Disable hoisting past potentially interfering loads. Optimized
1253               // Uses may point to an access outside the loop, as getClobbering
1254               // checks the previous iteration when walking the backedge.
1255               // FIXME: More precise: no Uses that alias SI.
1256               if (!Flags->IsSink && !MSSA->dominates(SIMD, MU))
1257                 return false;
1258             } else if (const auto *MD = dyn_cast<MemoryDef>(&MA)) {
1259               if (auto *LI = dyn_cast<LoadInst>(MD->getMemoryInst())) {
1260                 (void)LI; // Silence warning.
1261                 assert(!LI->isUnordered() && "Expected unordered load");
1262                 return false;
1263               }
1264               // Any call, while it may not be clobbering SI, it may be a use.
1265               if (auto *CI = dyn_cast<CallInst>(MD->getMemoryInst())) {
1266                 // Check if the call may read from the memory locattion written
1267                 // to by SI. Check CI's attributes and arguments; the number of
1268                 // such checks performed is limited above by NoOfMemAccTooLarge.
1269                 ModRefInfo MRI = AA->getModRefInfo(CI, MemoryLocation::get(SI));
1270                 if (isModOrRefSet(MRI))
1271                   return false;
1272               }
1273             }
1274         }
1275 
1276       auto *Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(SI);
1277       Flags->LicmMssaOptCounter++;
1278       // If there are no clobbering Defs in the loop, store is safe to hoist.
1279       return MSSA->isLiveOnEntryDef(Source) ||
1280              !CurLoop->contains(Source->getBlock());
1281     }
1282   }
1283 
1284   assert(!I.mayReadOrWriteMemory() && "unhandled aliasing");
1285 
1286   // We've established mechanical ability and aliasing, it's up to the caller
1287   // to check fault safety
1288   return true;
1289 }
1290 
1291 /// Returns true if a PHINode is a trivially replaceable with an
1292 /// Instruction.
1293 /// This is true when all incoming values are that instruction.
1294 /// This pattern occurs most often with LCSSA PHI nodes.
1295 ///
isTriviallyReplaceablePHI(const PHINode & PN,const Instruction & I)1296 static bool isTriviallyReplaceablePHI(const PHINode &PN, const Instruction &I) {
1297   for (const Value *IncValue : PN.incoming_values())
1298     if (IncValue != &I)
1299       return false;
1300 
1301   return true;
1302 }
1303 
1304 /// Return true if the instruction is free in the loop.
isFreeInLoop(const Instruction & I,const Loop * CurLoop,const TargetTransformInfo * TTI)1305 static bool isFreeInLoop(const Instruction &I, const Loop *CurLoop,
1306                          const TargetTransformInfo *TTI) {
1307 
1308   if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(&I)) {
1309     if (TTI->getUserCost(GEP) != TargetTransformInfo::TCC_Free)
1310       return false;
1311     // For a GEP, we cannot simply use getUserCost because currently it
1312     // optimistically assume that a GEP will fold into addressing mode
1313     // regardless of its users.
1314     const BasicBlock *BB = GEP->getParent();
1315     for (const User *U : GEP->users()) {
1316       const Instruction *UI = cast<Instruction>(U);
1317       if (CurLoop->contains(UI) &&
1318           (BB != UI->getParent() ||
1319            (!isa<StoreInst>(UI) && !isa<LoadInst>(UI))))
1320         return false;
1321     }
1322     return true;
1323   } else
1324     return TTI->getUserCost(&I) == TargetTransformInfo::TCC_Free;
1325 }
1326 
1327 /// Return true if the only users of this instruction are outside of
1328 /// the loop. If this is true, we can sink the instruction to the exit
1329 /// blocks of the loop.
1330 ///
1331 /// We also return true if the instruction could be folded away in lowering.
1332 /// (e.g.,  a GEP can be folded into a load as an addressing mode in the loop).
isNotUsedOrFreeInLoop(const Instruction & I,const Loop * CurLoop,const LoopSafetyInfo * SafetyInfo,TargetTransformInfo * TTI,bool & FreeInLoop)1333 static bool isNotUsedOrFreeInLoop(const Instruction &I, const Loop *CurLoop,
1334                                   const LoopSafetyInfo *SafetyInfo,
1335                                   TargetTransformInfo *TTI, bool &FreeInLoop) {
1336   const auto &BlockColors = SafetyInfo->getBlockColors();
1337   bool IsFree = isFreeInLoop(I, CurLoop, TTI);
1338   for (const User *U : I.users()) {
1339     const Instruction *UI = cast<Instruction>(U);
1340     if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1341       const BasicBlock *BB = PN->getParent();
1342       // We cannot sink uses in catchswitches.
1343       if (isa<CatchSwitchInst>(BB->getTerminator()))
1344         return false;
1345 
1346       // We need to sink a callsite to a unique funclet.  Avoid sinking if the
1347       // phi use is too muddled.
1348       if (isa<CallInst>(I))
1349         if (!BlockColors.empty() &&
1350             BlockColors.find(const_cast<BasicBlock *>(BB))->second.size() != 1)
1351           return false;
1352     }
1353 
1354     if (CurLoop->contains(UI)) {
1355       if (IsFree) {
1356         FreeInLoop = true;
1357         continue;
1358       }
1359       return false;
1360     }
1361   }
1362   return true;
1363 }
1364 
CloneInstructionInExitBlock(Instruction & I,BasicBlock & ExitBlock,PHINode & PN,const LoopInfo * LI,const LoopSafetyInfo * SafetyInfo,MemorySSAUpdater * MSSAU)1365 static Instruction *CloneInstructionInExitBlock(
1366     Instruction &I, BasicBlock &ExitBlock, PHINode &PN, const LoopInfo *LI,
1367     const LoopSafetyInfo *SafetyInfo, MemorySSAUpdater *MSSAU) {
1368   Instruction *New;
1369   if (auto *CI = dyn_cast<CallInst>(&I)) {
1370     const auto &BlockColors = SafetyInfo->getBlockColors();
1371 
1372     // Sinking call-sites need to be handled differently from other
1373     // instructions.  The cloned call-site needs a funclet bundle operand
1374     // appropriate for its location in the CFG.
1375     SmallVector<OperandBundleDef, 1> OpBundles;
1376     for (unsigned BundleIdx = 0, BundleEnd = CI->getNumOperandBundles();
1377          BundleIdx != BundleEnd; ++BundleIdx) {
1378       OperandBundleUse Bundle = CI->getOperandBundleAt(BundleIdx);
1379       if (Bundle.getTagID() == LLVMContext::OB_funclet)
1380         continue;
1381 
1382       OpBundles.emplace_back(Bundle);
1383     }
1384 
1385     if (!BlockColors.empty()) {
1386       const ColorVector &CV = BlockColors.find(&ExitBlock)->second;
1387       assert(CV.size() == 1 && "non-unique color for exit block!");
1388       BasicBlock *BBColor = CV.front();
1389       Instruction *EHPad = BBColor->getFirstNonPHI();
1390       if (EHPad->isEHPad())
1391         OpBundles.emplace_back("funclet", EHPad);
1392     }
1393 
1394     New = CallInst::Create(CI, OpBundles);
1395   } else {
1396     New = I.clone();
1397   }
1398 
1399   ExitBlock.getInstList().insert(ExitBlock.getFirstInsertionPt(), New);
1400   if (!I.getName().empty())
1401     New->setName(I.getName() + ".le");
1402 
1403   if (MSSAU && MSSAU->getMemorySSA()->getMemoryAccess(&I)) {
1404     // Create a new MemoryAccess and let MemorySSA set its defining access.
1405     MemoryAccess *NewMemAcc = MSSAU->createMemoryAccessInBB(
1406         New, nullptr, New->getParent(), MemorySSA::Beginning);
1407     if (NewMemAcc) {
1408       if (auto *MemDef = dyn_cast<MemoryDef>(NewMemAcc))
1409         MSSAU->insertDef(MemDef, /*RenameUses=*/true);
1410       else {
1411         auto *MemUse = cast<MemoryUse>(NewMemAcc);
1412         MSSAU->insertUse(MemUse, /*RenameUses=*/true);
1413       }
1414     }
1415   }
1416 
1417   // Build LCSSA PHI nodes for any in-loop operands. Note that this is
1418   // particularly cheap because we can rip off the PHI node that we're
1419   // replacing for the number and blocks of the predecessors.
1420   // OPT: If this shows up in a profile, we can instead finish sinking all
1421   // invariant instructions, and then walk their operands to re-establish
1422   // LCSSA. That will eliminate creating PHI nodes just to nuke them when
1423   // sinking bottom-up.
1424   for (User::op_iterator OI = New->op_begin(), OE = New->op_end(); OI != OE;
1425        ++OI)
1426     if (Instruction *OInst = dyn_cast<Instruction>(*OI))
1427       if (Loop *OLoop = LI->getLoopFor(OInst->getParent()))
1428         if (!OLoop->contains(&PN)) {
1429           PHINode *OpPN =
1430               PHINode::Create(OInst->getType(), PN.getNumIncomingValues(),
1431                               OInst->getName() + ".lcssa", &ExitBlock.front());
1432           for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i)
1433             OpPN->addIncoming(OInst, PN.getIncomingBlock(i));
1434           *OI = OpPN;
1435         }
1436   return New;
1437 }
1438 
eraseInstruction(Instruction & I,ICFLoopSafetyInfo & SafetyInfo,AliasSetTracker * AST,MemorySSAUpdater * MSSAU)1439 static void eraseInstruction(Instruction &I, ICFLoopSafetyInfo &SafetyInfo,
1440                              AliasSetTracker *AST, MemorySSAUpdater *MSSAU) {
1441   if (AST)
1442     AST->deleteValue(&I);
1443   if (MSSAU)
1444     MSSAU->removeMemoryAccess(&I);
1445   SafetyInfo.removeInstruction(&I);
1446   I.eraseFromParent();
1447 }
1448 
moveInstructionBefore(Instruction & I,Instruction & Dest,ICFLoopSafetyInfo & SafetyInfo,MemorySSAUpdater * MSSAU,ScalarEvolution * SE)1449 static void moveInstructionBefore(Instruction &I, Instruction &Dest,
1450                                   ICFLoopSafetyInfo &SafetyInfo,
1451                                   MemorySSAUpdater *MSSAU,
1452                                   ScalarEvolution *SE) {
1453   SafetyInfo.removeInstruction(&I);
1454   SafetyInfo.insertInstructionTo(&I, Dest.getParent());
1455   I.moveBefore(&Dest);
1456   if (MSSAU)
1457     if (MemoryUseOrDef *OldMemAcc = cast_or_null<MemoryUseOrDef>(
1458             MSSAU->getMemorySSA()->getMemoryAccess(&I)))
1459       MSSAU->moveToPlace(OldMemAcc, Dest.getParent(),
1460                          MemorySSA::BeforeTerminator);
1461   if (SE)
1462     SE->forgetValue(&I);
1463 }
1464 
sinkThroughTriviallyReplaceablePHI(PHINode * TPN,Instruction * I,LoopInfo * LI,SmallDenseMap<BasicBlock *,Instruction *,32> & SunkCopies,const LoopSafetyInfo * SafetyInfo,const Loop * CurLoop,MemorySSAUpdater * MSSAU)1465 static Instruction *sinkThroughTriviallyReplaceablePHI(
1466     PHINode *TPN, Instruction *I, LoopInfo *LI,
1467     SmallDenseMap<BasicBlock *, Instruction *, 32> &SunkCopies,
1468     const LoopSafetyInfo *SafetyInfo, const Loop *CurLoop,
1469     MemorySSAUpdater *MSSAU) {
1470   assert(isTriviallyReplaceablePHI(*TPN, *I) &&
1471          "Expect only trivially replaceable PHI");
1472   BasicBlock *ExitBlock = TPN->getParent();
1473   Instruction *New;
1474   auto It = SunkCopies.find(ExitBlock);
1475   if (It != SunkCopies.end())
1476     New = It->second;
1477   else
1478     New = SunkCopies[ExitBlock] = CloneInstructionInExitBlock(
1479         *I, *ExitBlock, *TPN, LI, SafetyInfo, MSSAU);
1480   return New;
1481 }
1482 
canSplitPredecessors(PHINode * PN,LoopSafetyInfo * SafetyInfo)1483 static bool canSplitPredecessors(PHINode *PN, LoopSafetyInfo *SafetyInfo) {
1484   BasicBlock *BB = PN->getParent();
1485   if (!BB->canSplitPredecessors())
1486     return false;
1487   // It's not impossible to split EHPad blocks, but if BlockColors already exist
1488   // it require updating BlockColors for all offspring blocks accordingly. By
1489   // skipping such corner case, we can make updating BlockColors after splitting
1490   // predecessor fairly simple.
1491   if (!SafetyInfo->getBlockColors().empty() && BB->getFirstNonPHI()->isEHPad())
1492     return false;
1493   for (pred_iterator PI = pred_begin(BB), E = pred_end(BB); PI != E; ++PI) {
1494     BasicBlock *BBPred = *PI;
1495     if (isa<IndirectBrInst>(BBPred->getTerminator()) ||
1496         isa<CallBrInst>(BBPred->getTerminator()))
1497       return false;
1498   }
1499   return true;
1500 }
1501 
splitPredecessorsOfLoopExit(PHINode * PN,DominatorTree * DT,LoopInfo * LI,const Loop * CurLoop,LoopSafetyInfo * SafetyInfo,MemorySSAUpdater * MSSAU)1502 static void splitPredecessorsOfLoopExit(PHINode *PN, DominatorTree *DT,
1503                                         LoopInfo *LI, const Loop *CurLoop,
1504                                         LoopSafetyInfo *SafetyInfo,
1505                                         MemorySSAUpdater *MSSAU) {
1506 #ifndef NDEBUG
1507   SmallVector<BasicBlock *, 32> ExitBlocks;
1508   CurLoop->getUniqueExitBlocks(ExitBlocks);
1509   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1510                                              ExitBlocks.end());
1511 #endif
1512   BasicBlock *ExitBB = PN->getParent();
1513   assert(ExitBlockSet.count(ExitBB) && "Expect the PHI is in an exit block.");
1514 
1515   // Split predecessors of the loop exit to make instructions in the loop are
1516   // exposed to exit blocks through trivially replaceable PHIs while keeping the
1517   // loop in the canonical form where each predecessor of each exit block should
1518   // be contained within the loop. For example, this will convert the loop below
1519   // from
1520   //
1521   // LB1:
1522   //   %v1 =
1523   //   br %LE, %LB2
1524   // LB2:
1525   //   %v2 =
1526   //   br %LE, %LB1
1527   // LE:
1528   //   %p = phi [%v1, %LB1], [%v2, %LB2] <-- non-trivially replaceable
1529   //
1530   // to
1531   //
1532   // LB1:
1533   //   %v1 =
1534   //   br %LE.split, %LB2
1535   // LB2:
1536   //   %v2 =
1537   //   br %LE.split2, %LB1
1538   // LE.split:
1539   //   %p1 = phi [%v1, %LB1]  <-- trivially replaceable
1540   //   br %LE
1541   // LE.split2:
1542   //   %p2 = phi [%v2, %LB2]  <-- trivially replaceable
1543   //   br %LE
1544   // LE:
1545   //   %p = phi [%p1, %LE.split], [%p2, %LE.split2]
1546   //
1547   const auto &BlockColors = SafetyInfo->getBlockColors();
1548   SmallSetVector<BasicBlock *, 8> PredBBs(pred_begin(ExitBB), pred_end(ExitBB));
1549   while (!PredBBs.empty()) {
1550     BasicBlock *PredBB = *PredBBs.begin();
1551     assert(CurLoop->contains(PredBB) &&
1552            "Expect all predecessors are in the loop");
1553     if (PN->getBasicBlockIndex(PredBB) >= 0) {
1554       BasicBlock *NewPred = SplitBlockPredecessors(
1555           ExitBB, PredBB, ".split.loop.exit", DT, LI, MSSAU, true);
1556       // Since we do not allow splitting EH-block with BlockColors in
1557       // canSplitPredecessors(), we can simply assign predecessor's color to
1558       // the new block.
1559       if (!BlockColors.empty())
1560         // Grab a reference to the ColorVector to be inserted before getting the
1561         // reference to the vector we are copying because inserting the new
1562         // element in BlockColors might cause the map to be reallocated.
1563         SafetyInfo->copyColors(NewPred, PredBB);
1564     }
1565     PredBBs.remove(PredBB);
1566   }
1567 }
1568 
1569 /// When an instruction is found to only be used outside of the loop, this
1570 /// function moves it to the exit blocks and patches up SSA form as needed.
1571 /// This method is guaranteed to remove the original instruction from its
1572 /// position, and may either delete it or move it to outside of the loop.
1573 ///
sink(Instruction & I,LoopInfo * LI,DominatorTree * DT,const Loop * CurLoop,ICFLoopSafetyInfo * SafetyInfo,MemorySSAUpdater * MSSAU,OptimizationRemarkEmitter * ORE)1574 static bool sink(Instruction &I, LoopInfo *LI, DominatorTree *DT,
1575                  const Loop *CurLoop, ICFLoopSafetyInfo *SafetyInfo,
1576                  MemorySSAUpdater *MSSAU, OptimizationRemarkEmitter *ORE) {
1577   LLVM_DEBUG(dbgs() << "LICM sinking instruction: " << I << "\n");
1578   ORE->emit([&]() {
1579     return OptimizationRemark(DEBUG_TYPE, "InstSunk", &I)
1580            << "sinking " << ore::NV("Inst", &I);
1581   });
1582   bool Changed = false;
1583   if (isa<LoadInst>(I))
1584     ++NumMovedLoads;
1585   else if (isa<CallInst>(I))
1586     ++NumMovedCalls;
1587   ++NumSunk;
1588 
1589   // Iterate over users to be ready for actual sinking. Replace users via
1590   // unreachable blocks with undef and make all user PHIs trivially replaceable.
1591   SmallPtrSet<Instruction *, 8> VisitedUsers;
1592   for (Value::user_iterator UI = I.user_begin(), UE = I.user_end(); UI != UE;) {
1593     auto *User = cast<Instruction>(*UI);
1594     Use &U = UI.getUse();
1595     ++UI;
1596 
1597     if (VisitedUsers.count(User) || CurLoop->contains(User))
1598       continue;
1599 
1600     if (!DT->isReachableFromEntry(User->getParent())) {
1601       U = UndefValue::get(I.getType());
1602       Changed = true;
1603       continue;
1604     }
1605 
1606     // The user must be a PHI node.
1607     PHINode *PN = cast<PHINode>(User);
1608 
1609     // Surprisingly, instructions can be used outside of loops without any
1610     // exits.  This can only happen in PHI nodes if the incoming block is
1611     // unreachable.
1612     BasicBlock *BB = PN->getIncomingBlock(U);
1613     if (!DT->isReachableFromEntry(BB)) {
1614       U = UndefValue::get(I.getType());
1615       Changed = true;
1616       continue;
1617     }
1618 
1619     VisitedUsers.insert(PN);
1620     if (isTriviallyReplaceablePHI(*PN, I))
1621       continue;
1622 
1623     if (!canSplitPredecessors(PN, SafetyInfo))
1624       return Changed;
1625 
1626     // Split predecessors of the PHI so that we can make users trivially
1627     // replaceable.
1628     splitPredecessorsOfLoopExit(PN, DT, LI, CurLoop, SafetyInfo, MSSAU);
1629 
1630     // Should rebuild the iterators, as they may be invalidated by
1631     // splitPredecessorsOfLoopExit().
1632     UI = I.user_begin();
1633     UE = I.user_end();
1634   }
1635 
1636   if (VisitedUsers.empty())
1637     return Changed;
1638 
1639 #ifndef NDEBUG
1640   SmallVector<BasicBlock *, 32> ExitBlocks;
1641   CurLoop->getUniqueExitBlocks(ExitBlocks);
1642   SmallPtrSet<BasicBlock *, 32> ExitBlockSet(ExitBlocks.begin(),
1643                                              ExitBlocks.end());
1644 #endif
1645 
1646   // Clones of this instruction. Don't create more than one per exit block!
1647   SmallDenseMap<BasicBlock *, Instruction *, 32> SunkCopies;
1648 
1649   // If this instruction is only used outside of the loop, then all users are
1650   // PHI nodes in exit blocks due to LCSSA form. Just RAUW them with clones of
1651   // the instruction.
1652   SmallSetVector<User*, 8> Users(I.user_begin(), I.user_end());
1653   for (auto *UI : Users) {
1654     auto *User = cast<Instruction>(UI);
1655 
1656     if (CurLoop->contains(User))
1657       continue;
1658 
1659     PHINode *PN = cast<PHINode>(User);
1660     assert(ExitBlockSet.count(PN->getParent()) &&
1661            "The LCSSA PHI is not in an exit block!");
1662     // The PHI must be trivially replaceable.
1663     Instruction *New = sinkThroughTriviallyReplaceablePHI(
1664         PN, &I, LI, SunkCopies, SafetyInfo, CurLoop, MSSAU);
1665     PN->replaceAllUsesWith(New);
1666     eraseInstruction(*PN, *SafetyInfo, nullptr, nullptr);
1667     Changed = true;
1668   }
1669   return Changed;
1670 }
1671 
1672 /// When an instruction is found to only use loop invariant operands that
1673 /// is safe to hoist, this instruction is called to do the dirty work.
1674 ///
hoist(Instruction & I,const DominatorTree * DT,const Loop * CurLoop,BasicBlock * Dest,ICFLoopSafetyInfo * SafetyInfo,MemorySSAUpdater * MSSAU,ScalarEvolution * SE,OptimizationRemarkEmitter * ORE)1675 static void hoist(Instruction &I, const DominatorTree *DT, const Loop *CurLoop,
1676                   BasicBlock *Dest, ICFLoopSafetyInfo *SafetyInfo,
1677                   MemorySSAUpdater *MSSAU, ScalarEvolution *SE,
1678                   OptimizationRemarkEmitter *ORE) {
1679   LLVM_DEBUG(dbgs() << "LICM hoisting to " << Dest->getName() << ": " << I
1680                     << "\n");
1681   ORE->emit([&]() {
1682     return OptimizationRemark(DEBUG_TYPE, "Hoisted", &I) << "hoisting "
1683                                                          << ore::NV("Inst", &I);
1684   });
1685 
1686   // Metadata can be dependent on conditions we are hoisting above.
1687   // Conservatively strip all metadata on the instruction unless we were
1688   // guaranteed to execute I if we entered the loop, in which case the metadata
1689   // is valid in the loop preheader.
1690   if (I.hasMetadataOtherThanDebugLoc() &&
1691       // The check on hasMetadataOtherThanDebugLoc is to prevent us from burning
1692       // time in isGuaranteedToExecute if we don't actually have anything to
1693       // drop.  It is a compile time optimization, not required for correctness.
1694       !SafetyInfo->isGuaranteedToExecute(I, DT, CurLoop))
1695     I.dropUnknownNonDebugMetadata();
1696 
1697   if (isa<PHINode>(I))
1698     // Move the new node to the end of the phi list in the destination block.
1699     moveInstructionBefore(I, *Dest->getFirstNonPHI(), *SafetyInfo, MSSAU, SE);
1700   else
1701     // Move the new node to the destination block, before its terminator.
1702     moveInstructionBefore(I, *Dest->getTerminator(), *SafetyInfo, MSSAU, SE);
1703 
1704   // Apply line 0 debug locations when we are moving instructions to different
1705   // basic blocks because we want to avoid jumpy line tables.
1706   if (const DebugLoc &DL = I.getDebugLoc())
1707     I.setDebugLoc(DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
1708 
1709   if (isa<LoadInst>(I))
1710     ++NumMovedLoads;
1711   else if (isa<CallInst>(I))
1712     ++NumMovedCalls;
1713   ++NumHoisted;
1714 }
1715 
1716 /// Only sink or hoist an instruction if it is not a trapping instruction,
1717 /// or if the instruction is known not to trap when moved to the preheader.
1718 /// or if it is a trapping instruction and is guaranteed to execute.
isSafeToExecuteUnconditionally(Instruction & Inst,const DominatorTree * DT,const Loop * CurLoop,const LoopSafetyInfo * SafetyInfo,OptimizationRemarkEmitter * ORE,const Instruction * CtxI)1719 static bool isSafeToExecuteUnconditionally(Instruction &Inst,
1720                                            const DominatorTree *DT,
1721                                            const Loop *CurLoop,
1722                                            const LoopSafetyInfo *SafetyInfo,
1723                                            OptimizationRemarkEmitter *ORE,
1724                                            const Instruction *CtxI) {
1725   if (isSafeToSpeculativelyExecute(&Inst, CtxI, DT))
1726     return true;
1727 
1728   bool GuaranteedToExecute =
1729       SafetyInfo->isGuaranteedToExecute(Inst, DT, CurLoop);
1730 
1731   if (!GuaranteedToExecute) {
1732     auto *LI = dyn_cast<LoadInst>(&Inst);
1733     if (LI && CurLoop->isLoopInvariant(LI->getPointerOperand()))
1734       ORE->emit([&]() {
1735         return OptimizationRemarkMissed(
1736                    DEBUG_TYPE, "LoadWithLoopInvariantAddressCondExecuted", LI)
1737                << "failed to hoist load with loop-invariant address "
1738                   "because load is conditionally executed";
1739       });
1740   }
1741 
1742   return GuaranteedToExecute;
1743 }
1744 
1745 namespace {
1746 class LoopPromoter : public LoadAndStorePromoter {
1747   Value *SomePtr; // Designated pointer to store to.
1748   const SmallSetVector<Value *, 8> &PointerMustAliases;
1749   SmallVectorImpl<BasicBlock *> &LoopExitBlocks;
1750   SmallVectorImpl<Instruction *> &LoopInsertPts;
1751   SmallVectorImpl<MemoryAccess *> &MSSAInsertPts;
1752   PredIteratorCache &PredCache;
1753   AliasSetTracker &AST;
1754   MemorySSAUpdater *MSSAU;
1755   LoopInfo &LI;
1756   DebugLoc DL;
1757   int Alignment;
1758   bool UnorderedAtomic;
1759   AAMDNodes AATags;
1760   ICFLoopSafetyInfo &SafetyInfo;
1761 
maybeInsertLCSSAPHI(Value * V,BasicBlock * BB) const1762   Value *maybeInsertLCSSAPHI(Value *V, BasicBlock *BB) const {
1763     if (Instruction *I = dyn_cast<Instruction>(V))
1764       if (Loop *L = LI.getLoopFor(I->getParent()))
1765         if (!L->contains(BB)) {
1766           // We need to create an LCSSA PHI node for the incoming value and
1767           // store that.
1768           PHINode *PN = PHINode::Create(I->getType(), PredCache.size(BB),
1769                                         I->getName() + ".lcssa", &BB->front());
1770           for (BasicBlock *Pred : PredCache.get(BB))
1771             PN->addIncoming(I, Pred);
1772           return PN;
1773         }
1774     return V;
1775   }
1776 
1777 public:
LoopPromoter(Value * SP,ArrayRef<const Instruction * > Insts,SSAUpdater & S,const SmallSetVector<Value *,8> & PMA,SmallVectorImpl<BasicBlock * > & LEB,SmallVectorImpl<Instruction * > & LIP,SmallVectorImpl<MemoryAccess * > & MSSAIP,PredIteratorCache & PIC,AliasSetTracker & ast,MemorySSAUpdater * MSSAU,LoopInfo & li,DebugLoc dl,int alignment,bool UnorderedAtomic,const AAMDNodes & AATags,ICFLoopSafetyInfo & SafetyInfo)1778   LoopPromoter(Value *SP, ArrayRef<const Instruction *> Insts, SSAUpdater &S,
1779                const SmallSetVector<Value *, 8> &PMA,
1780                SmallVectorImpl<BasicBlock *> &LEB,
1781                SmallVectorImpl<Instruction *> &LIP,
1782                SmallVectorImpl<MemoryAccess *> &MSSAIP, PredIteratorCache &PIC,
1783                AliasSetTracker &ast, MemorySSAUpdater *MSSAU, LoopInfo &li,
1784                DebugLoc dl, int alignment, bool UnorderedAtomic,
1785                const AAMDNodes &AATags, ICFLoopSafetyInfo &SafetyInfo)
1786       : LoadAndStorePromoter(Insts, S), SomePtr(SP), PointerMustAliases(PMA),
1787         LoopExitBlocks(LEB), LoopInsertPts(LIP), MSSAInsertPts(MSSAIP),
1788         PredCache(PIC), AST(ast), MSSAU(MSSAU), LI(li), DL(std::move(dl)),
1789         Alignment(alignment), UnorderedAtomic(UnorderedAtomic), AATags(AATags),
1790         SafetyInfo(SafetyInfo) {}
1791 
isInstInList(Instruction * I,const SmallVectorImpl<Instruction * > &) const1792   bool isInstInList(Instruction *I,
1793                     const SmallVectorImpl<Instruction *> &) const override {
1794     Value *Ptr;
1795     if (LoadInst *LI = dyn_cast<LoadInst>(I))
1796       Ptr = LI->getOperand(0);
1797     else
1798       Ptr = cast<StoreInst>(I)->getPointerOperand();
1799     return PointerMustAliases.count(Ptr);
1800   }
1801 
doExtraRewritesBeforeFinalDeletion()1802   void doExtraRewritesBeforeFinalDeletion() override {
1803     // Insert stores after in the loop exit blocks.  Each exit block gets a
1804     // store of the live-out values that feed them.  Since we've already told
1805     // the SSA updater about the defs in the loop and the preheader
1806     // definition, it is all set and we can start using it.
1807     for (unsigned i = 0, e = LoopExitBlocks.size(); i != e; ++i) {
1808       BasicBlock *ExitBlock = LoopExitBlocks[i];
1809       Value *LiveInValue = SSA.GetValueInMiddleOfBlock(ExitBlock);
1810       LiveInValue = maybeInsertLCSSAPHI(LiveInValue, ExitBlock);
1811       Value *Ptr = maybeInsertLCSSAPHI(SomePtr, ExitBlock);
1812       Instruction *InsertPos = LoopInsertPts[i];
1813       StoreInst *NewSI = new StoreInst(LiveInValue, Ptr, InsertPos);
1814       if (UnorderedAtomic)
1815         NewSI->setOrdering(AtomicOrdering::Unordered);
1816       NewSI->setAlignment(MaybeAlign(Alignment));
1817       NewSI->setDebugLoc(DL);
1818       if (AATags)
1819         NewSI->setAAMetadata(AATags);
1820 
1821       if (MSSAU) {
1822         MemoryAccess *MSSAInsertPoint = MSSAInsertPts[i];
1823         MemoryAccess *NewMemAcc;
1824         if (!MSSAInsertPoint) {
1825           NewMemAcc = MSSAU->createMemoryAccessInBB(
1826               NewSI, nullptr, NewSI->getParent(), MemorySSA::Beginning);
1827         } else {
1828           NewMemAcc =
1829               MSSAU->createMemoryAccessAfter(NewSI, nullptr, MSSAInsertPoint);
1830         }
1831         MSSAInsertPts[i] = NewMemAcc;
1832         MSSAU->insertDef(cast<MemoryDef>(NewMemAcc), true);
1833         // FIXME: true for safety, false may still be correct.
1834       }
1835     }
1836   }
1837 
replaceLoadWithValue(LoadInst * LI,Value * V) const1838   void replaceLoadWithValue(LoadInst *LI, Value *V) const override {
1839     // Update alias analysis.
1840     AST.copyValue(LI, V);
1841   }
instructionDeleted(Instruction * I) const1842   void instructionDeleted(Instruction *I) const override {
1843     SafetyInfo.removeInstruction(I);
1844     AST.deleteValue(I);
1845     if (MSSAU)
1846       MSSAU->removeMemoryAccess(I);
1847   }
1848 };
1849 
1850 
1851 /// Return true iff we can prove that a caller of this function can not inspect
1852 /// the contents of the provided object in a well defined program.
isKnownNonEscaping(Value * Object,const TargetLibraryInfo * TLI)1853 bool isKnownNonEscaping(Value *Object, const TargetLibraryInfo *TLI) {
1854   if (isa<AllocaInst>(Object))
1855     // Since the alloca goes out of scope, we know the caller can't retain a
1856     // reference to it and be well defined.  Thus, we don't need to check for
1857     // capture.
1858     return true;
1859 
1860   // For all other objects we need to know that the caller can't possibly
1861   // have gotten a reference to the object.  There are two components of
1862   // that:
1863   //   1) Object can't be escaped by this function.  This is what
1864   //      PointerMayBeCaptured checks.
1865   //   2) Object can't have been captured at definition site.  For this, we
1866   //      need to know the return value is noalias.  At the moment, we use a
1867   //      weaker condition and handle only AllocLikeFunctions (which are
1868   //      known to be noalias).  TODO
1869   return isAllocLikeFn(Object, TLI) &&
1870     !PointerMayBeCaptured(Object, true, true);
1871 }
1872 
1873 } // namespace
1874 
1875 /// Try to promote memory values to scalars by sinking stores out of the
1876 /// loop and moving loads to before the loop.  We do this by looping over
1877 /// the stores in the loop, looking for stores to Must pointers which are
1878 /// loop invariant.
1879 ///
promoteLoopAccessesToScalars(const SmallSetVector<Value *,8> & PointerMustAliases,SmallVectorImpl<BasicBlock * > & ExitBlocks,SmallVectorImpl<Instruction * > & InsertPts,SmallVectorImpl<MemoryAccess * > & MSSAInsertPts,PredIteratorCache & PIC,LoopInfo * LI,DominatorTree * DT,const TargetLibraryInfo * TLI,Loop * CurLoop,AliasSetTracker * CurAST,MemorySSAUpdater * MSSAU,ICFLoopSafetyInfo * SafetyInfo,OptimizationRemarkEmitter * ORE)1880 bool llvm::promoteLoopAccessesToScalars(
1881     const SmallSetVector<Value *, 8> &PointerMustAliases,
1882     SmallVectorImpl<BasicBlock *> &ExitBlocks,
1883     SmallVectorImpl<Instruction *> &InsertPts,
1884     SmallVectorImpl<MemoryAccess *> &MSSAInsertPts, PredIteratorCache &PIC,
1885     LoopInfo *LI, DominatorTree *DT, const TargetLibraryInfo *TLI,
1886     Loop *CurLoop, AliasSetTracker *CurAST, MemorySSAUpdater *MSSAU,
1887     ICFLoopSafetyInfo *SafetyInfo, OptimizationRemarkEmitter *ORE) {
1888   // Verify inputs.
1889   assert(LI != nullptr && DT != nullptr && CurLoop != nullptr &&
1890          CurAST != nullptr && SafetyInfo != nullptr &&
1891          "Unexpected Input to promoteLoopAccessesToScalars");
1892 
1893   Value *SomePtr = *PointerMustAliases.begin();
1894   BasicBlock *Preheader = CurLoop->getLoopPreheader();
1895 
1896   // It is not safe to promote a load/store from the loop if the load/store is
1897   // conditional.  For example, turning:
1898   //
1899   //    for () { if (c) *P += 1; }
1900   //
1901   // into:
1902   //
1903   //    tmp = *P;  for () { if (c) tmp +=1; } *P = tmp;
1904   //
1905   // is not safe, because *P may only be valid to access if 'c' is true.
1906   //
1907   // The safety property divides into two parts:
1908   // p1) The memory may not be dereferenceable on entry to the loop.  In this
1909   //    case, we can't insert the required load in the preheader.
1910   // p2) The memory model does not allow us to insert a store along any dynamic
1911   //    path which did not originally have one.
1912   //
1913   // If at least one store is guaranteed to execute, both properties are
1914   // satisfied, and promotion is legal.
1915   //
1916   // This, however, is not a necessary condition. Even if no store/load is
1917   // guaranteed to execute, we can still establish these properties.
1918   // We can establish (p1) by proving that hoisting the load into the preheader
1919   // is safe (i.e. proving dereferenceability on all paths through the loop). We
1920   // can use any access within the alias set to prove dereferenceability,
1921   // since they're all must alias.
1922   //
1923   // There are two ways establish (p2):
1924   // a) Prove the location is thread-local. In this case the memory model
1925   // requirement does not apply, and stores are safe to insert.
1926   // b) Prove a store dominates every exit block. In this case, if an exit
1927   // blocks is reached, the original dynamic path would have taken us through
1928   // the store, so inserting a store into the exit block is safe. Note that this
1929   // is different from the store being guaranteed to execute. For instance,
1930   // if an exception is thrown on the first iteration of the loop, the original
1931   // store is never executed, but the exit blocks are not executed either.
1932 
1933   bool DereferenceableInPH = false;
1934   bool SafeToInsertStore = false;
1935 
1936   SmallVector<Instruction *, 64> LoopUses;
1937 
1938   // We start with an alignment of one and try to find instructions that allow
1939   // us to prove better alignment.
1940   unsigned Alignment = 1;
1941   // Keep track of which types of access we see
1942   bool SawUnorderedAtomic = false;
1943   bool SawNotAtomic = false;
1944   AAMDNodes AATags;
1945 
1946   const DataLayout &MDL = Preheader->getModule()->getDataLayout();
1947 
1948   bool IsKnownThreadLocalObject = false;
1949   if (SafetyInfo->anyBlockMayThrow()) {
1950     // If a loop can throw, we have to insert a store along each unwind edge.
1951     // That said, we can't actually make the unwind edge explicit. Therefore,
1952     // we have to prove that the store is dead along the unwind edge.  We do
1953     // this by proving that the caller can't have a reference to the object
1954     // after return and thus can't possibly load from the object.
1955     Value *Object = GetUnderlyingObject(SomePtr, MDL);
1956     if (!isKnownNonEscaping(Object, TLI))
1957       return false;
1958     // Subtlety: Alloca's aren't visible to callers, but *are* potentially
1959     // visible to other threads if captured and used during their lifetimes.
1960     IsKnownThreadLocalObject = !isa<AllocaInst>(Object);
1961   }
1962 
1963   // Check that all of the pointers in the alias set have the same type.  We
1964   // cannot (yet) promote a memory location that is loaded and stored in
1965   // different sizes.  While we are at it, collect alignment and AA info.
1966   for (Value *ASIV : PointerMustAliases) {
1967     // Check that all of the pointers in the alias set have the same type.  We
1968     // cannot (yet) promote a memory location that is loaded and stored in
1969     // different sizes.
1970     if (SomePtr->getType() != ASIV->getType())
1971       return false;
1972 
1973     for (User *U : ASIV->users()) {
1974       // Ignore instructions that are outside the loop.
1975       Instruction *UI = dyn_cast<Instruction>(U);
1976       if (!UI || !CurLoop->contains(UI))
1977         continue;
1978 
1979       // If there is an non-load/store instruction in the loop, we can't promote
1980       // it.
1981       if (LoadInst *Load = dyn_cast<LoadInst>(UI)) {
1982         if (!Load->isUnordered())
1983           return false;
1984 
1985         SawUnorderedAtomic |= Load->isAtomic();
1986         SawNotAtomic |= !Load->isAtomic();
1987 
1988         unsigned InstAlignment = Load->getAlignment();
1989         if (!InstAlignment)
1990           InstAlignment =
1991               MDL.getABITypeAlignment(Load->getType());
1992 
1993         // Note that proving a load safe to speculate requires proving
1994         // sufficient alignment at the target location.  Proving it guaranteed
1995         // to execute does as well.  Thus we can increase our guaranteed
1996         // alignment as well.
1997         if (!DereferenceableInPH || (InstAlignment > Alignment))
1998           if (isSafeToExecuteUnconditionally(*Load, DT, CurLoop, SafetyInfo,
1999                                              ORE, Preheader->getTerminator())) {
2000             DereferenceableInPH = true;
2001             Alignment = std::max(Alignment, InstAlignment);
2002           }
2003       } else if (const StoreInst *Store = dyn_cast<StoreInst>(UI)) {
2004         // Stores *of* the pointer are not interesting, only stores *to* the
2005         // pointer.
2006         if (UI->getOperand(1) != ASIV)
2007           continue;
2008         if (!Store->isUnordered())
2009           return false;
2010 
2011         SawUnorderedAtomic |= Store->isAtomic();
2012         SawNotAtomic |= !Store->isAtomic();
2013 
2014         // If the store is guaranteed to execute, both properties are satisfied.
2015         // We may want to check if a store is guaranteed to execute even if we
2016         // already know that promotion is safe, since it may have higher
2017         // alignment than any other guaranteed stores, in which case we can
2018         // raise the alignment on the promoted store.
2019         unsigned InstAlignment = Store->getAlignment();
2020         if (!InstAlignment)
2021           InstAlignment =
2022               MDL.getABITypeAlignment(Store->getValueOperand()->getType());
2023 
2024         if (!DereferenceableInPH || !SafeToInsertStore ||
2025             (InstAlignment > Alignment)) {
2026           if (SafetyInfo->isGuaranteedToExecute(*UI, DT, CurLoop)) {
2027             DereferenceableInPH = true;
2028             SafeToInsertStore = true;
2029             Alignment = std::max(Alignment, InstAlignment);
2030           }
2031         }
2032 
2033         // If a store dominates all exit blocks, it is safe to sink.
2034         // As explained above, if an exit block was executed, a dominating
2035         // store must have been executed at least once, so we are not
2036         // introducing stores on paths that did not have them.
2037         // Note that this only looks at explicit exit blocks. If we ever
2038         // start sinking stores into unwind edges (see above), this will break.
2039         if (!SafeToInsertStore)
2040           SafeToInsertStore = llvm::all_of(ExitBlocks, [&](BasicBlock *Exit) {
2041             return DT->dominates(Store->getParent(), Exit);
2042           });
2043 
2044         // If the store is not guaranteed to execute, we may still get
2045         // deref info through it.
2046         if (!DereferenceableInPH) {
2047           DereferenceableInPH = isDereferenceableAndAlignedPointer(
2048               Store->getPointerOperand(), Store->getValueOperand()->getType(),
2049               MaybeAlign(Store->getAlignment()), MDL,
2050               Preheader->getTerminator(), DT);
2051         }
2052       } else
2053         return false; // Not a load or store.
2054 
2055       // Merge the AA tags.
2056       if (LoopUses.empty()) {
2057         // On the first load/store, just take its AA tags.
2058         UI->getAAMetadata(AATags);
2059       } else if (AATags) {
2060         UI->getAAMetadata(AATags, /* Merge = */ true);
2061       }
2062 
2063       LoopUses.push_back(UI);
2064     }
2065   }
2066 
2067   // If we found both an unordered atomic instruction and a non-atomic memory
2068   // access, bail.  We can't blindly promote non-atomic to atomic since we
2069   // might not be able to lower the result.  We can't downgrade since that
2070   // would violate memory model.  Also, align 0 is an error for atomics.
2071   if (SawUnorderedAtomic && SawNotAtomic)
2072     return false;
2073 
2074   // If we're inserting an atomic load in the preheader, we must be able to
2075   // lower it.  We're only guaranteed to be able to lower naturally aligned
2076   // atomics.
2077   auto *SomePtrElemType = SomePtr->getType()->getPointerElementType();
2078   if (SawUnorderedAtomic &&
2079       Alignment < MDL.getTypeStoreSize(SomePtrElemType))
2080     return false;
2081 
2082   // If we couldn't prove we can hoist the load, bail.
2083   if (!DereferenceableInPH)
2084     return false;
2085 
2086   // We know we can hoist the load, but don't have a guaranteed store.
2087   // Check whether the location is thread-local. If it is, then we can insert
2088   // stores along paths which originally didn't have them without violating the
2089   // memory model.
2090   if (!SafeToInsertStore) {
2091     if (IsKnownThreadLocalObject)
2092       SafeToInsertStore = true;
2093     else {
2094       Value *Object = GetUnderlyingObject(SomePtr, MDL);
2095       SafeToInsertStore =
2096           (isAllocLikeFn(Object, TLI) || isa<AllocaInst>(Object)) &&
2097           !PointerMayBeCaptured(Object, true, true);
2098     }
2099   }
2100 
2101   // If we've still failed to prove we can sink the store, give up.
2102   if (!SafeToInsertStore)
2103     return false;
2104 
2105   // Otherwise, this is safe to promote, lets do it!
2106   LLVM_DEBUG(dbgs() << "LICM: Promoting value stored to in loop: " << *SomePtr
2107                     << '\n');
2108   ORE->emit([&]() {
2109     return OptimizationRemark(DEBUG_TYPE, "PromoteLoopAccessesToScalar",
2110                               LoopUses[0])
2111            << "Moving accesses to memory location out of the loop";
2112   });
2113   ++NumPromoted;
2114 
2115   // Grab a debug location for the inserted loads/stores; given that the
2116   // inserted loads/stores have little relation to the original loads/stores,
2117   // this code just arbitrarily picks a location from one, since any debug
2118   // location is better than none.
2119   DebugLoc DL = LoopUses[0]->getDebugLoc();
2120 
2121   // We use the SSAUpdater interface to insert phi nodes as required.
2122   SmallVector<PHINode *, 16> NewPHIs;
2123   SSAUpdater SSA(&NewPHIs);
2124   LoopPromoter Promoter(SomePtr, LoopUses, SSA, PointerMustAliases, ExitBlocks,
2125                         InsertPts, MSSAInsertPts, PIC, *CurAST, MSSAU, *LI, DL,
2126                         Alignment, SawUnorderedAtomic, AATags, *SafetyInfo);
2127 
2128   // Set up the preheader to have a definition of the value.  It is the live-out
2129   // value from the preheader that uses in the loop will use.
2130   LoadInst *PreheaderLoad = new LoadInst(
2131       SomePtr->getType()->getPointerElementType(), SomePtr,
2132       SomePtr->getName() + ".promoted", Preheader->getTerminator());
2133   if (SawUnorderedAtomic)
2134     PreheaderLoad->setOrdering(AtomicOrdering::Unordered);
2135   PreheaderLoad->setAlignment(MaybeAlign(Alignment));
2136   PreheaderLoad->setDebugLoc(DL);
2137   if (AATags)
2138     PreheaderLoad->setAAMetadata(AATags);
2139   SSA.AddAvailableValue(Preheader, PreheaderLoad);
2140 
2141   if (MSSAU) {
2142     MemoryAccess *PreheaderLoadMemoryAccess = MSSAU->createMemoryAccessInBB(
2143         PreheaderLoad, nullptr, PreheaderLoad->getParent(), MemorySSA::End);
2144     MemoryUse *NewMemUse = cast<MemoryUse>(PreheaderLoadMemoryAccess);
2145     MSSAU->insertUse(NewMemUse, /*RenameUses=*/true);
2146   }
2147 
2148   if (MSSAU && VerifyMemorySSA)
2149     MSSAU->getMemorySSA()->verifyMemorySSA();
2150   // Rewrite all the loads in the loop and remember all the definitions from
2151   // stores in the loop.
2152   Promoter.run(LoopUses);
2153 
2154   if (MSSAU && VerifyMemorySSA)
2155     MSSAU->getMemorySSA()->verifyMemorySSA();
2156   // If the SSAUpdater didn't use the load in the preheader, just zap it now.
2157   if (PreheaderLoad->use_empty())
2158     eraseInstruction(*PreheaderLoad, *SafetyInfo, CurAST, MSSAU);
2159 
2160   return true;
2161 }
2162 
2163 /// Returns an owning pointer to an alias set which incorporates aliasing info
2164 /// from L and all subloops of L.
2165 /// FIXME: In new pass manager, there is no helper function to handle loop
2166 /// analysis such as cloneBasicBlockAnalysis, so the AST needs to be recomputed
2167 /// from scratch for every loop. Hook up with the helper functions when
2168 /// available in the new pass manager to avoid redundant computation.
2169 std::unique_ptr<AliasSetTracker>
collectAliasInfoForLoop(Loop * L,LoopInfo * LI,AliasAnalysis * AA)2170 LoopInvariantCodeMotion::collectAliasInfoForLoop(Loop *L, LoopInfo *LI,
2171                                                  AliasAnalysis *AA) {
2172   std::unique_ptr<AliasSetTracker> CurAST;
2173   SmallVector<Loop *, 4> RecomputeLoops;
2174   for (Loop *InnerL : L->getSubLoops()) {
2175     auto MapI = LoopToAliasSetMap.find(InnerL);
2176     // If the AST for this inner loop is missing it may have been merged into
2177     // some other loop's AST and then that loop unrolled, and so we need to
2178     // recompute it.
2179     if (MapI == LoopToAliasSetMap.end()) {
2180       RecomputeLoops.push_back(InnerL);
2181       continue;
2182     }
2183     std::unique_ptr<AliasSetTracker> InnerAST = std::move(MapI->second);
2184 
2185     if (CurAST) {
2186       // What if InnerLoop was modified by other passes ?
2187       // Once we've incorporated the inner loop's AST into ours, we don't need
2188       // the subloop's anymore.
2189       CurAST->add(*InnerAST);
2190     } else {
2191       CurAST = std::move(InnerAST);
2192     }
2193     LoopToAliasSetMap.erase(MapI);
2194   }
2195   if (!CurAST)
2196     CurAST = std::make_unique<AliasSetTracker>(*AA);
2197 
2198   // Add everything from the sub loops that are no longer directly available.
2199   for (Loop *InnerL : RecomputeLoops)
2200     for (BasicBlock *BB : InnerL->blocks())
2201       CurAST->add(*BB);
2202 
2203   // And merge in this loop (without anything from inner loops).
2204   for (BasicBlock *BB : L->blocks())
2205     if (LI->getLoopFor(BB) == L)
2206       CurAST->add(*BB);
2207 
2208   return CurAST;
2209 }
2210 
2211 std::unique_ptr<AliasSetTracker>
collectAliasInfoForLoopWithMSSA(Loop * L,AliasAnalysis * AA,MemorySSAUpdater * MSSAU)2212 LoopInvariantCodeMotion::collectAliasInfoForLoopWithMSSA(
2213     Loop *L, AliasAnalysis *AA, MemorySSAUpdater *MSSAU) {
2214   auto *MSSA = MSSAU->getMemorySSA();
2215   auto CurAST = std::make_unique<AliasSetTracker>(*AA, MSSA, L);
2216   CurAST->addAllInstructionsInLoopUsingMSSA();
2217   return CurAST;
2218 }
2219 
2220 /// Simple analysis hook. Clone alias set info.
2221 ///
cloneBasicBlockAnalysis(BasicBlock * From,BasicBlock * To,Loop * L)2222 void LegacyLICMPass::cloneBasicBlockAnalysis(BasicBlock *From, BasicBlock *To,
2223                                              Loop *L) {
2224   auto ASTIt = LICM.getLoopToAliasSetMap().find(L);
2225   if (ASTIt == LICM.getLoopToAliasSetMap().end())
2226     return;
2227 
2228   ASTIt->second->copyValue(From, To);
2229 }
2230 
2231 /// Simple Analysis hook. Delete value V from alias set
2232 ///
deleteAnalysisValue(Value * V,Loop * L)2233 void LegacyLICMPass::deleteAnalysisValue(Value *V, Loop *L) {
2234   auto ASTIt = LICM.getLoopToAliasSetMap().find(L);
2235   if (ASTIt == LICM.getLoopToAliasSetMap().end())
2236     return;
2237 
2238   ASTIt->second->deleteValue(V);
2239 }
2240 
2241 /// Simple Analysis hook. Delete value L from alias set map.
2242 ///
deleteAnalysisLoop(Loop * L)2243 void LegacyLICMPass::deleteAnalysisLoop(Loop *L) {
2244   if (!LICM.getLoopToAliasSetMap().count(L))
2245     return;
2246 
2247   LICM.getLoopToAliasSetMap().erase(L);
2248 }
2249 
pointerInvalidatedByLoop(MemoryLocation MemLoc,AliasSetTracker * CurAST,Loop * CurLoop,AliasAnalysis * AA)2250 static bool pointerInvalidatedByLoop(MemoryLocation MemLoc,
2251                                      AliasSetTracker *CurAST, Loop *CurLoop,
2252                                      AliasAnalysis *AA) {
2253   // First check to see if any of the basic blocks in CurLoop invalidate *V.
2254   bool isInvalidatedAccordingToAST = CurAST->getAliasSetFor(MemLoc).isMod();
2255 
2256   if (!isInvalidatedAccordingToAST || !LICMN2Theshold)
2257     return isInvalidatedAccordingToAST;
2258 
2259   // Check with a diagnostic analysis if we can refine the information above.
2260   // This is to identify the limitations of using the AST.
2261   // The alias set mechanism used by LICM has a major weakness in that it
2262   // combines all things which may alias into a single set *before* asking
2263   // modref questions. As a result, a single readonly call within a loop will
2264   // collapse all loads and stores into a single alias set and report
2265   // invalidation if the loop contains any store. For example, readonly calls
2266   // with deopt states have this form and create a general alias set with all
2267   // loads and stores.  In order to get any LICM in loops containing possible
2268   // deopt states we need a more precise invalidation of checking the mod ref
2269   // info of each instruction within the loop and LI. This has a complexity of
2270   // O(N^2), so currently, it is used only as a diagnostic tool since the
2271   // default value of LICMN2Threshold is zero.
2272 
2273   // Don't look at nested loops.
2274   if (CurLoop->begin() != CurLoop->end())
2275     return true;
2276 
2277   int N = 0;
2278   for (BasicBlock *BB : CurLoop->getBlocks())
2279     for (Instruction &I : *BB) {
2280       if (N >= LICMN2Theshold) {
2281         LLVM_DEBUG(dbgs() << "Alasing N2 threshold exhausted for "
2282                           << *(MemLoc.Ptr) << "\n");
2283         return true;
2284       }
2285       N++;
2286       auto Res = AA->getModRefInfo(&I, MemLoc);
2287       if (isModSet(Res)) {
2288         LLVM_DEBUG(dbgs() << "Aliasing failed on " << I << " for "
2289                           << *(MemLoc.Ptr) << "\n");
2290         return true;
2291       }
2292     }
2293   LLVM_DEBUG(dbgs() << "Aliasing okay for " << *(MemLoc.Ptr) << "\n");
2294   return false;
2295 }
2296 
pointerInvalidatedByLoopWithMSSA(MemorySSA * MSSA,MemoryUse * MU,Loop * CurLoop,SinkAndHoistLICMFlags & Flags)2297 static bool pointerInvalidatedByLoopWithMSSA(MemorySSA *MSSA, MemoryUse *MU,
2298                                              Loop *CurLoop,
2299                                              SinkAndHoistLICMFlags &Flags) {
2300   // For hoisting, use the walker to determine safety
2301   if (!Flags.IsSink) {
2302     MemoryAccess *Source;
2303     // See declaration of SetLicmMssaOptCap for usage details.
2304     if (Flags.LicmMssaOptCounter >= Flags.LicmMssaOptCap)
2305       Source = MU->getDefiningAccess();
2306     else {
2307       Source = MSSA->getSkipSelfWalker()->getClobberingMemoryAccess(MU);
2308       Flags.LicmMssaOptCounter++;
2309     }
2310     return !MSSA->isLiveOnEntryDef(Source) &&
2311            CurLoop->contains(Source->getBlock());
2312   }
2313 
2314   // For sinking, we'd need to check all Defs below this use. The getClobbering
2315   // call will look on the backedge of the loop, but will check aliasing with
2316   // the instructions on the previous iteration.
2317   // For example:
2318   // for (i ... )
2319   //   load a[i] ( Use (LoE)
2320   //   store a[i] ( 1 = Def (2), with 2 = Phi for the loop.
2321   //   i++;
2322   // The load sees no clobbering inside the loop, as the backedge alias check
2323   // does phi translation, and will check aliasing against store a[i-1].
2324   // However sinking the load outside the loop, below the store is incorrect.
2325 
2326   // For now, only sink if there are no Defs in the loop, and the existing ones
2327   // precede the use and are in the same block.
2328   // FIXME: Increase precision: Safe to sink if Use post dominates the Def;
2329   // needs PostDominatorTreeAnalysis.
2330   // FIXME: More precise: no Defs that alias this Use.
2331   if (Flags.NoOfMemAccTooLarge)
2332     return true;
2333   for (auto *BB : CurLoop->getBlocks())
2334     if (auto *Accesses = MSSA->getBlockDefs(BB))
2335       for (const auto &MA : *Accesses)
2336         if (const auto *MD = dyn_cast<MemoryDef>(&MA))
2337           if (MU->getBlock() != MD->getBlock() ||
2338               !MSSA->locallyDominates(MD, MU))
2339             return true;
2340   return false;
2341 }
2342 
2343 /// Little predicate that returns true if the specified basic block is in
2344 /// a subloop of the current one, not the current one itself.
2345 ///
inSubLoop(BasicBlock * BB,Loop * CurLoop,LoopInfo * LI)2346 static bool inSubLoop(BasicBlock *BB, Loop *CurLoop, LoopInfo *LI) {
2347   assert(CurLoop->contains(BB) && "Only valid if BB is IN the loop");
2348   return LI->getLoopFor(BB) != CurLoop;
2349 }
2350