1 //===- MemorySSA.cpp - Memory SSA Builder ---------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the MemorySSA class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/Analysis/MemorySSA.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/DenseMapInfo.h"
17 #include "llvm/ADT/DenseSet.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/Hashing.h"
20 #include "llvm/ADT/None.h"
21 #include "llvm/ADT/Optional.h"
22 #include "llvm/ADT/STLExtras.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/iterator.h"
26 #include "llvm/ADT/iterator_range.h"
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/IteratedDominanceFrontier.h"
29 #include "llvm/Analysis/MemoryLocation.h"
30 #include "llvm/Config/llvm-config.h"
31 #include "llvm/IR/AssemblyAnnotationWriter.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/CallSite.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/IR/PassManager.h"
42 #include "llvm/IR/Use.h"
43 #include "llvm/Pass.h"
44 #include "llvm/Support/AtomicOrdering.h"
45 #include "llvm/Support/Casting.h"
46 #include "llvm/Support/CommandLine.h"
47 #include "llvm/Support/Compiler.h"
48 #include "llvm/Support/Debug.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/FormattedStream.h"
51 #include "llvm/Support/raw_ostream.h"
52 #include <algorithm>
53 #include <cassert>
54 #include <iterator>
55 #include <memory>
56 #include <utility>
57
58 using namespace llvm;
59
60 #define DEBUG_TYPE "memoryssa"
61
62 INITIALIZE_PASS_BEGIN(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
63 true)
64 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
65 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
66 INITIALIZE_PASS_END(MemorySSAWrapperPass, "memoryssa", "Memory SSA", false,
67 true)
68
69 INITIALIZE_PASS_BEGIN(MemorySSAPrinterLegacyPass, "print-memoryssa",
70 "Memory SSA Printer", false, false)
71 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
72 INITIALIZE_PASS_END(MemorySSAPrinterLegacyPass, "print-memoryssa",
73 "Memory SSA Printer", false, false)
74
75 static cl::opt<unsigned> MaxCheckLimit(
76 "memssa-check-limit", cl::Hidden, cl::init(100),
77 cl::desc("The maximum number of stores/phis MemorySSA"
78 "will consider trying to walk past (default = 100)"));
79
80 static cl::opt<bool>
81 VerifyMemorySSA("verify-memoryssa", cl::init(false), cl::Hidden,
82 cl::desc("Verify MemorySSA in legacy printer pass."));
83
84 namespace llvm {
85
86 /// An assembly annotator class to print Memory SSA information in
87 /// comments.
88 class MemorySSAAnnotatedWriter : public AssemblyAnnotationWriter {
89 friend class MemorySSA;
90
91 const MemorySSA *MSSA;
92
93 public:
MemorySSAAnnotatedWriter(const MemorySSA * M)94 MemorySSAAnnotatedWriter(const MemorySSA *M) : MSSA(M) {}
95
emitBasicBlockStartAnnot(const BasicBlock * BB,formatted_raw_ostream & OS)96 void emitBasicBlockStartAnnot(const BasicBlock *BB,
97 formatted_raw_ostream &OS) override {
98 if (MemoryAccess *MA = MSSA->getMemoryAccess(BB))
99 OS << "; " << *MA << "\n";
100 }
101
emitInstructionAnnot(const Instruction * I,formatted_raw_ostream & OS)102 void emitInstructionAnnot(const Instruction *I,
103 formatted_raw_ostream &OS) override {
104 if (MemoryAccess *MA = MSSA->getMemoryAccess(I))
105 OS << "; " << *MA << "\n";
106 }
107 };
108
109 } // end namespace llvm
110
111 namespace {
112
113 /// Our current alias analysis API differentiates heavily between calls and
114 /// non-calls, and functions called on one usually assert on the other.
115 /// This class encapsulates the distinction to simplify other code that wants
116 /// "Memory affecting instructions and related data" to use as a key.
117 /// For example, this class is used as a densemap key in the use optimizer.
118 class MemoryLocOrCall {
119 public:
120 bool IsCall = false;
121
MemoryLocOrCall(MemoryUseOrDef * MUD)122 MemoryLocOrCall(MemoryUseOrDef *MUD)
123 : MemoryLocOrCall(MUD->getMemoryInst()) {}
MemoryLocOrCall(const MemoryUseOrDef * MUD)124 MemoryLocOrCall(const MemoryUseOrDef *MUD)
125 : MemoryLocOrCall(MUD->getMemoryInst()) {}
126
MemoryLocOrCall(Instruction * Inst)127 MemoryLocOrCall(Instruction *Inst) {
128 if (ImmutableCallSite(Inst)) {
129 IsCall = true;
130 CS = ImmutableCallSite(Inst);
131 } else {
132 IsCall = false;
133 // There is no such thing as a memorylocation for a fence inst, and it is
134 // unique in that regard.
135 if (!isa<FenceInst>(Inst))
136 Loc = MemoryLocation::get(Inst);
137 }
138 }
139
MemoryLocOrCall(const MemoryLocation & Loc)140 explicit MemoryLocOrCall(const MemoryLocation &Loc) : Loc(Loc) {}
141
getCS() const142 ImmutableCallSite getCS() const {
143 assert(IsCall);
144 return CS;
145 }
146
getLoc() const147 MemoryLocation getLoc() const {
148 assert(!IsCall);
149 return Loc;
150 }
151
operator ==(const MemoryLocOrCall & Other) const152 bool operator==(const MemoryLocOrCall &Other) const {
153 if (IsCall != Other.IsCall)
154 return false;
155
156 if (!IsCall)
157 return Loc == Other.Loc;
158
159 if (CS.getCalledValue() != Other.CS.getCalledValue())
160 return false;
161
162 return CS.arg_size() == Other.CS.arg_size() &&
163 std::equal(CS.arg_begin(), CS.arg_end(), Other.CS.arg_begin());
164 }
165
166 private:
167 union {
168 ImmutableCallSite CS;
169 MemoryLocation Loc;
170 };
171 };
172
173 } // end anonymous namespace
174
175 namespace llvm {
176
177 template <> struct DenseMapInfo<MemoryLocOrCall> {
getEmptyKeyllvm::DenseMapInfo178 static inline MemoryLocOrCall getEmptyKey() {
179 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getEmptyKey());
180 }
181
getTombstoneKeyllvm::DenseMapInfo182 static inline MemoryLocOrCall getTombstoneKey() {
183 return MemoryLocOrCall(DenseMapInfo<MemoryLocation>::getTombstoneKey());
184 }
185
getHashValuellvm::DenseMapInfo186 static unsigned getHashValue(const MemoryLocOrCall &MLOC) {
187 if (!MLOC.IsCall)
188 return hash_combine(
189 MLOC.IsCall,
190 DenseMapInfo<MemoryLocation>::getHashValue(MLOC.getLoc()));
191
192 hash_code hash =
193 hash_combine(MLOC.IsCall, DenseMapInfo<const Value *>::getHashValue(
194 MLOC.getCS().getCalledValue()));
195
196 for (const Value *Arg : MLOC.getCS().args())
197 hash = hash_combine(hash, DenseMapInfo<const Value *>::getHashValue(Arg));
198 return hash;
199 }
200
isEqualllvm::DenseMapInfo201 static bool isEqual(const MemoryLocOrCall &LHS, const MemoryLocOrCall &RHS) {
202 return LHS == RHS;
203 }
204 };
205
206 } // end namespace llvm
207
208 /// This does one-way checks to see if Use could theoretically be hoisted above
209 /// MayClobber. This will not check the other way around.
210 ///
211 /// This assumes that, for the purposes of MemorySSA, Use comes directly after
212 /// MayClobber, with no potentially clobbering operations in between them.
213 /// (Where potentially clobbering ops are memory barriers, aliased stores, etc.)
areLoadsReorderable(const LoadInst * Use,const LoadInst * MayClobber)214 static bool areLoadsReorderable(const LoadInst *Use,
215 const LoadInst *MayClobber) {
216 bool VolatileUse = Use->isVolatile();
217 bool VolatileClobber = MayClobber->isVolatile();
218 // Volatile operations may never be reordered with other volatile operations.
219 if (VolatileUse && VolatileClobber)
220 return false;
221 // Otherwise, volatile doesn't matter here. From the language reference:
222 // 'optimizers may change the order of volatile operations relative to
223 // non-volatile operations.'"
224
225 // If a load is seq_cst, it cannot be moved above other loads. If its ordering
226 // is weaker, it can be moved above other loads. We just need to be sure that
227 // MayClobber isn't an acquire load, because loads can't be moved above
228 // acquire loads.
229 //
230 // Note that this explicitly *does* allow the free reordering of monotonic (or
231 // weaker) loads of the same address.
232 bool SeqCstUse = Use->getOrdering() == AtomicOrdering::SequentiallyConsistent;
233 bool MayClobberIsAcquire = isAtLeastOrStrongerThan(MayClobber->getOrdering(),
234 AtomicOrdering::Acquire);
235 return !(SeqCstUse || MayClobberIsAcquire);
236 }
237
238 namespace {
239
240 struct ClobberAlias {
241 bool IsClobber;
242 Optional<AliasResult> AR;
243 };
244
245 } // end anonymous namespace
246
247 // Return a pair of {IsClobber (bool), AR (AliasResult)}. It relies on AR being
248 // ignored if IsClobber = false.
instructionClobbersQuery(MemoryDef * MD,const MemoryLocation & UseLoc,const Instruction * UseInst,AliasAnalysis & AA)249 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
250 const MemoryLocation &UseLoc,
251 const Instruction *UseInst,
252 AliasAnalysis &AA) {
253 Instruction *DefInst = MD->getMemoryInst();
254 assert(DefInst && "Defining instruction not actually an instruction");
255 ImmutableCallSite UseCS(UseInst);
256 Optional<AliasResult> AR;
257
258 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(DefInst)) {
259 // These intrinsics will show up as affecting memory, but they are just
260 // markers, mostly.
261 //
262 // FIXME: We probably don't actually want MemorySSA to model these at all
263 // (including creating MemoryAccesses for them): we just end up inventing
264 // clobbers where they don't really exist at all. Please see D43269 for
265 // context.
266 switch (II->getIntrinsicID()) {
267 case Intrinsic::lifetime_start:
268 if (UseCS)
269 return {false, NoAlias};
270 AR = AA.alias(MemoryLocation(II->getArgOperand(1)), UseLoc);
271 return {AR != NoAlias, AR};
272 case Intrinsic::lifetime_end:
273 case Intrinsic::invariant_start:
274 case Intrinsic::invariant_end:
275 case Intrinsic::assume:
276 return {false, NoAlias};
277 default:
278 break;
279 }
280 }
281
282 if (UseCS) {
283 ModRefInfo I = AA.getModRefInfo(DefInst, UseCS);
284 AR = isMustSet(I) ? MustAlias : MayAlias;
285 return {isModOrRefSet(I), AR};
286 }
287
288 if (auto *DefLoad = dyn_cast<LoadInst>(DefInst))
289 if (auto *UseLoad = dyn_cast<LoadInst>(UseInst))
290 return {!areLoadsReorderable(UseLoad, DefLoad), MayAlias};
291
292 ModRefInfo I = AA.getModRefInfo(DefInst, UseLoc);
293 AR = isMustSet(I) ? MustAlias : MayAlias;
294 return {isModSet(I), AR};
295 }
296
instructionClobbersQuery(MemoryDef * MD,const MemoryUseOrDef * MU,const MemoryLocOrCall & UseMLOC,AliasAnalysis & AA)297 static ClobberAlias instructionClobbersQuery(MemoryDef *MD,
298 const MemoryUseOrDef *MU,
299 const MemoryLocOrCall &UseMLOC,
300 AliasAnalysis &AA) {
301 // FIXME: This is a temporary hack to allow a single instructionClobbersQuery
302 // to exist while MemoryLocOrCall is pushed through places.
303 if (UseMLOC.IsCall)
304 return instructionClobbersQuery(MD, MemoryLocation(), MU->getMemoryInst(),
305 AA);
306 return instructionClobbersQuery(MD, UseMLOC.getLoc(), MU->getMemoryInst(),
307 AA);
308 }
309
310 // Return true when MD may alias MU, return false otherwise.
defClobbersUseOrDef(MemoryDef * MD,const MemoryUseOrDef * MU,AliasAnalysis & AA)311 bool MemorySSAUtil::defClobbersUseOrDef(MemoryDef *MD, const MemoryUseOrDef *MU,
312 AliasAnalysis &AA) {
313 return instructionClobbersQuery(MD, MU, MemoryLocOrCall(MU), AA).IsClobber;
314 }
315
316 namespace {
317
318 struct UpwardsMemoryQuery {
319 // True if our original query started off as a call
320 bool IsCall = false;
321 // The pointer location we started the query with. This will be empty if
322 // IsCall is true.
323 MemoryLocation StartingLoc;
324 // This is the instruction we were querying about.
325 const Instruction *Inst = nullptr;
326 // The MemoryAccess we actually got called with, used to test local domination
327 const MemoryAccess *OriginalAccess = nullptr;
328 Optional<AliasResult> AR = MayAlias;
329
330 UpwardsMemoryQuery() = default;
331
UpwardsMemoryQuery__anonb12b10260411::UpwardsMemoryQuery332 UpwardsMemoryQuery(const Instruction *Inst, const MemoryAccess *Access)
333 : IsCall(ImmutableCallSite(Inst)), Inst(Inst), OriginalAccess(Access) {
334 if (!IsCall)
335 StartingLoc = MemoryLocation::get(Inst);
336 }
337 };
338
339 } // end anonymous namespace
340
lifetimeEndsAt(MemoryDef * MD,const MemoryLocation & Loc,AliasAnalysis & AA)341 static bool lifetimeEndsAt(MemoryDef *MD, const MemoryLocation &Loc,
342 AliasAnalysis &AA) {
343 Instruction *Inst = MD->getMemoryInst();
344 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
345 switch (II->getIntrinsicID()) {
346 case Intrinsic::lifetime_end:
347 return AA.isMustAlias(MemoryLocation(II->getArgOperand(1)), Loc);
348 default:
349 return false;
350 }
351 }
352 return false;
353 }
354
isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis & AA,const Instruction * I)355 static bool isUseTriviallyOptimizableToLiveOnEntry(AliasAnalysis &AA,
356 const Instruction *I) {
357 // If the memory can't be changed, then loads of the memory can't be
358 // clobbered.
359 return isa<LoadInst>(I) && (I->getMetadata(LLVMContext::MD_invariant_load) ||
360 AA.pointsToConstantMemory(cast<LoadInst>(I)->
361 getPointerOperand()));
362 }
363
364 /// Verifies that `Start` is clobbered by `ClobberAt`, and that nothing
365 /// inbetween `Start` and `ClobberAt` can clobbers `Start`.
366 ///
367 /// This is meant to be as simple and self-contained as possible. Because it
368 /// uses no cache, etc., it can be relatively expensive.
369 ///
370 /// \param Start The MemoryAccess that we want to walk from.
371 /// \param ClobberAt A clobber for Start.
372 /// \param StartLoc The MemoryLocation for Start.
373 /// \param MSSA The MemorySSA isntance that Start and ClobberAt belong to.
374 /// \param Query The UpwardsMemoryQuery we used for our search.
375 /// \param AA The AliasAnalysis we used for our search.
376 static void LLVM_ATTRIBUTE_UNUSED
checkClobberSanity(MemoryAccess * Start,MemoryAccess * ClobberAt,const MemoryLocation & StartLoc,const MemorySSA & MSSA,const UpwardsMemoryQuery & Query,AliasAnalysis & AA)377 checkClobberSanity(MemoryAccess *Start, MemoryAccess *ClobberAt,
378 const MemoryLocation &StartLoc, const MemorySSA &MSSA,
379 const UpwardsMemoryQuery &Query, AliasAnalysis &AA) {
380 assert(MSSA.dominates(ClobberAt, Start) && "Clobber doesn't dominate start?");
381
382 if (MSSA.isLiveOnEntryDef(Start)) {
383 assert(MSSA.isLiveOnEntryDef(ClobberAt) &&
384 "liveOnEntry must clobber itself");
385 return;
386 }
387
388 bool FoundClobber = false;
389 DenseSet<MemoryAccessPair> VisitedPhis;
390 SmallVector<MemoryAccessPair, 8> Worklist;
391 Worklist.emplace_back(Start, StartLoc);
392 // Walk all paths from Start to ClobberAt, while looking for clobbers. If one
393 // is found, complain.
394 while (!Worklist.empty()) {
395 MemoryAccessPair MAP = Worklist.pop_back_val();
396 // All we care about is that nothing from Start to ClobberAt clobbers Start.
397 // We learn nothing from revisiting nodes.
398 if (!VisitedPhis.insert(MAP).second)
399 continue;
400
401 for (MemoryAccess *MA : def_chain(MAP.first)) {
402 if (MA == ClobberAt) {
403 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
404 // instructionClobbersQuery isn't essentially free, so don't use `|=`,
405 // since it won't let us short-circuit.
406 //
407 // Also, note that this can't be hoisted out of the `Worklist` loop,
408 // since MD may only act as a clobber for 1 of N MemoryLocations.
409 FoundClobber = FoundClobber || MSSA.isLiveOnEntryDef(MD);
410 if (!FoundClobber) {
411 ClobberAlias CA =
412 instructionClobbersQuery(MD, MAP.second, Query.Inst, AA);
413 if (CA.IsClobber) {
414 FoundClobber = true;
415 // Not used: CA.AR;
416 }
417 }
418 }
419 break;
420 }
421
422 // We should never hit liveOnEntry, unless it's the clobber.
423 assert(!MSSA.isLiveOnEntryDef(MA) && "Hit liveOnEntry before clobber?");
424
425 if (auto *MD = dyn_cast<MemoryDef>(MA)) {
426 (void)MD;
427 assert(!instructionClobbersQuery(MD, MAP.second, Query.Inst, AA)
428 .IsClobber &&
429 "Found clobber before reaching ClobberAt!");
430 continue;
431 }
432
433 assert(isa<MemoryPhi>(MA));
434 Worklist.append(upward_defs_begin({MA, MAP.second}), upward_defs_end());
435 }
436 }
437
438 // If ClobberAt is a MemoryPhi, we can assume something above it acted as a
439 // clobber. Otherwise, `ClobberAt` should've acted as a clobber at some point.
440 assert((isa<MemoryPhi>(ClobberAt) || FoundClobber) &&
441 "ClobberAt never acted as a clobber");
442 }
443
444 namespace {
445
446 /// Our algorithm for walking (and trying to optimize) clobbers, all wrapped up
447 /// in one class.
448 class ClobberWalker {
449 /// Save a few bytes by using unsigned instead of size_t.
450 using ListIndex = unsigned;
451
452 /// Represents a span of contiguous MemoryDefs, potentially ending in a
453 /// MemoryPhi.
454 struct DefPath {
455 MemoryLocation Loc;
456 // Note that, because we always walk in reverse, Last will always dominate
457 // First. Also note that First and Last are inclusive.
458 MemoryAccess *First;
459 MemoryAccess *Last;
460 Optional<ListIndex> Previous;
461
DefPath__anonb12b10260511::ClobberWalker::DefPath462 DefPath(const MemoryLocation &Loc, MemoryAccess *First, MemoryAccess *Last,
463 Optional<ListIndex> Previous)
464 : Loc(Loc), First(First), Last(Last), Previous(Previous) {}
465
DefPath__anonb12b10260511::ClobberWalker::DefPath466 DefPath(const MemoryLocation &Loc, MemoryAccess *Init,
467 Optional<ListIndex> Previous)
468 : DefPath(Loc, Init, Init, Previous) {}
469 };
470
471 const MemorySSA &MSSA;
472 AliasAnalysis &AA;
473 DominatorTree &DT;
474 UpwardsMemoryQuery *Query;
475
476 // Phi optimization bookkeeping
477 SmallVector<DefPath, 32> Paths;
478 DenseSet<ConstMemoryAccessPair> VisitedPhis;
479
480 /// Find the nearest def or phi that `From` can legally be optimized to.
getWalkTarget(const MemoryPhi * From) const481 const MemoryAccess *getWalkTarget(const MemoryPhi *From) const {
482 assert(From->getNumOperands() && "Phi with no operands?");
483
484 BasicBlock *BB = From->getBlock();
485 MemoryAccess *Result = MSSA.getLiveOnEntryDef();
486 DomTreeNode *Node = DT.getNode(BB);
487 while ((Node = Node->getIDom())) {
488 auto *Defs = MSSA.getBlockDefs(Node->getBlock());
489 if (Defs)
490 return &*Defs->rbegin();
491 }
492 return Result;
493 }
494
495 /// Result of calling walkToPhiOrClobber.
496 struct UpwardsWalkResult {
497 /// The "Result" of the walk. Either a clobber, the last thing we walked, or
498 /// both. Include alias info when clobber found.
499 MemoryAccess *Result;
500 bool IsKnownClobber;
501 Optional<AliasResult> AR;
502 };
503
504 /// Walk to the next Phi or Clobber in the def chain starting at Desc.Last.
505 /// This will update Desc.Last as it walks. It will (optionally) also stop at
506 /// StopAt.
507 ///
508 /// This does not test for whether StopAt is a clobber
509 UpwardsWalkResult
walkToPhiOrClobber(DefPath & Desc,const MemoryAccess * StopAt=nullptr) const510 walkToPhiOrClobber(DefPath &Desc,
511 const MemoryAccess *StopAt = nullptr) const {
512 assert(!isa<MemoryUse>(Desc.Last) && "Uses don't exist in my world");
513
514 for (MemoryAccess *Current : def_chain(Desc.Last)) {
515 Desc.Last = Current;
516 if (Current == StopAt)
517 return {Current, false, MayAlias};
518
519 if (auto *MD = dyn_cast<MemoryDef>(Current)) {
520 if (MSSA.isLiveOnEntryDef(MD))
521 return {MD, true, MustAlias};
522 ClobberAlias CA =
523 instructionClobbersQuery(MD, Desc.Loc, Query->Inst, AA);
524 if (CA.IsClobber)
525 return {MD, true, CA.AR};
526 }
527 }
528
529 assert(isa<MemoryPhi>(Desc.Last) &&
530 "Ended at a non-clobber that's not a phi?");
531 return {Desc.Last, false, MayAlias};
532 }
533
addSearches(MemoryPhi * Phi,SmallVectorImpl<ListIndex> & PausedSearches,ListIndex PriorNode)534 void addSearches(MemoryPhi *Phi, SmallVectorImpl<ListIndex> &PausedSearches,
535 ListIndex PriorNode) {
536 auto UpwardDefs = make_range(upward_defs_begin({Phi, Paths[PriorNode].Loc}),
537 upward_defs_end());
538 for (const MemoryAccessPair &P : UpwardDefs) {
539 PausedSearches.push_back(Paths.size());
540 Paths.emplace_back(P.second, P.first, PriorNode);
541 }
542 }
543
544 /// Represents a search that terminated after finding a clobber. This clobber
545 /// may or may not be present in the path of defs from LastNode..SearchStart,
546 /// since it may have been retrieved from cache.
547 struct TerminatedPath {
548 MemoryAccess *Clobber;
549 ListIndex LastNode;
550 };
551
552 /// Get an access that keeps us from optimizing to the given phi.
553 ///
554 /// PausedSearches is an array of indices into the Paths array. Its incoming
555 /// value is the indices of searches that stopped at the last phi optimization
556 /// target. It's left in an unspecified state.
557 ///
558 /// If this returns None, NewPaused is a vector of searches that terminated
559 /// at StopWhere. Otherwise, NewPaused is left in an unspecified state.
560 Optional<TerminatedPath>
getBlockingAccess(const MemoryAccess * StopWhere,SmallVectorImpl<ListIndex> & PausedSearches,SmallVectorImpl<ListIndex> & NewPaused,SmallVectorImpl<TerminatedPath> & Terminated)561 getBlockingAccess(const MemoryAccess *StopWhere,
562 SmallVectorImpl<ListIndex> &PausedSearches,
563 SmallVectorImpl<ListIndex> &NewPaused,
564 SmallVectorImpl<TerminatedPath> &Terminated) {
565 assert(!PausedSearches.empty() && "No searches to continue?");
566
567 // BFS vs DFS really doesn't make a difference here, so just do a DFS with
568 // PausedSearches as our stack.
569 while (!PausedSearches.empty()) {
570 ListIndex PathIndex = PausedSearches.pop_back_val();
571 DefPath &Node = Paths[PathIndex];
572
573 // If we've already visited this path with this MemoryLocation, we don't
574 // need to do so again.
575 //
576 // NOTE: That we just drop these paths on the ground makes caching
577 // behavior sporadic. e.g. given a diamond:
578 // A
579 // B C
580 // D
581 //
582 // ...If we walk D, B, A, C, we'll only cache the result of phi
583 // optimization for A, B, and D; C will be skipped because it dies here.
584 // This arguably isn't the worst thing ever, since:
585 // - We generally query things in a top-down order, so if we got below D
586 // without needing cache entries for {C, MemLoc}, then chances are
587 // that those cache entries would end up ultimately unused.
588 // - We still cache things for A, so C only needs to walk up a bit.
589 // If this behavior becomes problematic, we can fix without a ton of extra
590 // work.
591 if (!VisitedPhis.insert({Node.Last, Node.Loc}).second)
592 continue;
593
594 UpwardsWalkResult Res = walkToPhiOrClobber(Node, /*StopAt=*/StopWhere);
595 if (Res.IsKnownClobber) {
596 assert(Res.Result != StopWhere);
597 // If this wasn't a cache hit, we hit a clobber when walking. That's a
598 // failure.
599 TerminatedPath Term{Res.Result, PathIndex};
600 if (!MSSA.dominates(Res.Result, StopWhere))
601 return Term;
602
603 // Otherwise, it's a valid thing to potentially optimize to.
604 Terminated.push_back(Term);
605 continue;
606 }
607
608 if (Res.Result == StopWhere) {
609 // We've hit our target. Save this path off for if we want to continue
610 // walking.
611 NewPaused.push_back(PathIndex);
612 continue;
613 }
614
615 assert(!MSSA.isLiveOnEntryDef(Res.Result) && "liveOnEntry is a clobber");
616 addSearches(cast<MemoryPhi>(Res.Result), PausedSearches, PathIndex);
617 }
618
619 return None;
620 }
621
622 template <typename T, typename Walker>
623 struct generic_def_path_iterator
624 : public iterator_facade_base<generic_def_path_iterator<T, Walker>,
625 std::forward_iterator_tag, T *> {
626 generic_def_path_iterator() = default;
generic_def_path_iterator__anonb12b10260511::ClobberWalker::generic_def_path_iterator627 generic_def_path_iterator(Walker *W, ListIndex N) : W(W), N(N) {}
628
operator *__anonb12b10260511::ClobberWalker::generic_def_path_iterator629 T &operator*() const { return curNode(); }
630
operator ++__anonb12b10260511::ClobberWalker::generic_def_path_iterator631 generic_def_path_iterator &operator++() {
632 N = curNode().Previous;
633 return *this;
634 }
635
operator ==__anonb12b10260511::ClobberWalker::generic_def_path_iterator636 bool operator==(const generic_def_path_iterator &O) const {
637 if (N.hasValue() != O.N.hasValue())
638 return false;
639 return !N.hasValue() || *N == *O.N;
640 }
641
642 private:
curNode__anonb12b10260511::ClobberWalker::generic_def_path_iterator643 T &curNode() const { return W->Paths[*N]; }
644
645 Walker *W = nullptr;
646 Optional<ListIndex> N = None;
647 };
648
649 using def_path_iterator = generic_def_path_iterator<DefPath, ClobberWalker>;
650 using const_def_path_iterator =
651 generic_def_path_iterator<const DefPath, const ClobberWalker>;
652
def_path(ListIndex From)653 iterator_range<def_path_iterator> def_path(ListIndex From) {
654 return make_range(def_path_iterator(this, From), def_path_iterator());
655 }
656
const_def_path(ListIndex From) const657 iterator_range<const_def_path_iterator> const_def_path(ListIndex From) const {
658 return make_range(const_def_path_iterator(this, From),
659 const_def_path_iterator());
660 }
661
662 struct OptznResult {
663 /// The path that contains our result.
664 TerminatedPath PrimaryClobber;
665 /// The paths that we can legally cache back from, but that aren't
666 /// necessarily the result of the Phi optimization.
667 SmallVector<TerminatedPath, 4> OtherClobbers;
668 };
669
defPathIndex(const DefPath & N) const670 ListIndex defPathIndex(const DefPath &N) const {
671 // The assert looks nicer if we don't need to do &N
672 const DefPath *NP = &N;
673 assert(!Paths.empty() && NP >= &Paths.front() && NP <= &Paths.back() &&
674 "Out of bounds DefPath!");
675 return NP - &Paths.front();
676 }
677
678 /// Try to optimize a phi as best as we can. Returns a SmallVector of Paths
679 /// that act as legal clobbers. Note that this won't return *all* clobbers.
680 ///
681 /// Phi optimization algorithm tl;dr:
682 /// - Find the earliest def/phi, A, we can optimize to
683 /// - Find if all paths from the starting memory access ultimately reach A
684 /// - If not, optimization isn't possible.
685 /// - Otherwise, walk from A to another clobber or phi, A'.
686 /// - If A' is a def, we're done.
687 /// - If A' is a phi, try to optimize it.
688 ///
689 /// A path is a series of {MemoryAccess, MemoryLocation} pairs. A path
690 /// terminates when a MemoryAccess that clobbers said MemoryLocation is found.
tryOptimizePhi(MemoryPhi * Phi,MemoryAccess * Start,const MemoryLocation & Loc)691 OptznResult tryOptimizePhi(MemoryPhi *Phi, MemoryAccess *Start,
692 const MemoryLocation &Loc) {
693 assert(Paths.empty() && VisitedPhis.empty() &&
694 "Reset the optimization state.");
695
696 Paths.emplace_back(Loc, Start, Phi, None);
697 // Stores how many "valid" optimization nodes we had prior to calling
698 // addSearches/getBlockingAccess. Necessary for caching if we had a blocker.
699 auto PriorPathsSize = Paths.size();
700
701 SmallVector<ListIndex, 16> PausedSearches;
702 SmallVector<ListIndex, 8> NewPaused;
703 SmallVector<TerminatedPath, 4> TerminatedPaths;
704
705 addSearches(Phi, PausedSearches, 0);
706
707 // Moves the TerminatedPath with the "most dominated" Clobber to the end of
708 // Paths.
709 auto MoveDominatedPathToEnd = [&](SmallVectorImpl<TerminatedPath> &Paths) {
710 assert(!Paths.empty() && "Need a path to move");
711 auto Dom = Paths.begin();
712 for (auto I = std::next(Dom), E = Paths.end(); I != E; ++I)
713 if (!MSSA.dominates(I->Clobber, Dom->Clobber))
714 Dom = I;
715 auto Last = Paths.end() - 1;
716 if (Last != Dom)
717 std::iter_swap(Last, Dom);
718 };
719
720 MemoryPhi *Current = Phi;
721 while (true) {
722 assert(!MSSA.isLiveOnEntryDef(Current) &&
723 "liveOnEntry wasn't treated as a clobber?");
724
725 const auto *Target = getWalkTarget(Current);
726 // If a TerminatedPath doesn't dominate Target, then it wasn't a legal
727 // optimization for the prior phi.
728 assert(all_of(TerminatedPaths, [&](const TerminatedPath &P) {
729 return MSSA.dominates(P.Clobber, Target);
730 }));
731
732 // FIXME: This is broken, because the Blocker may be reported to be
733 // liveOnEntry, and we'll happily wait for that to disappear (read: never)
734 // For the moment, this is fine, since we do nothing with blocker info.
735 if (Optional<TerminatedPath> Blocker = getBlockingAccess(
736 Target, PausedSearches, NewPaused, TerminatedPaths)) {
737
738 // Find the node we started at. We can't search based on N->Last, since
739 // we may have gone around a loop with a different MemoryLocation.
740 auto Iter = find_if(def_path(Blocker->LastNode), [&](const DefPath &N) {
741 return defPathIndex(N) < PriorPathsSize;
742 });
743 assert(Iter != def_path_iterator());
744
745 DefPath &CurNode = *Iter;
746 assert(CurNode.Last == Current);
747
748 // Two things:
749 // A. We can't reliably cache all of NewPaused back. Consider a case
750 // where we have two paths in NewPaused; one of which can't optimize
751 // above this phi, whereas the other can. If we cache the second path
752 // back, we'll end up with suboptimal cache entries. We can handle
753 // cases like this a bit better when we either try to find all
754 // clobbers that block phi optimization, or when our cache starts
755 // supporting unfinished searches.
756 // B. We can't reliably cache TerminatedPaths back here without doing
757 // extra checks; consider a case like:
758 // T
759 // / \
760 // D C
761 // \ /
762 // S
763 // Where T is our target, C is a node with a clobber on it, D is a
764 // diamond (with a clobber *only* on the left or right node, N), and
765 // S is our start. Say we walk to D, through the node opposite N
766 // (read: ignoring the clobber), and see a cache entry in the top
767 // node of D. That cache entry gets put into TerminatedPaths. We then
768 // walk up to C (N is later in our worklist), find the clobber, and
769 // quit. If we append TerminatedPaths to OtherClobbers, we'll cache
770 // the bottom part of D to the cached clobber, ignoring the clobber
771 // in N. Again, this problem goes away if we start tracking all
772 // blockers for a given phi optimization.
773 TerminatedPath Result{CurNode.Last, defPathIndex(CurNode)};
774 return {Result, {}};
775 }
776
777 // If there's nothing left to search, then all paths led to valid clobbers
778 // that we got from our cache; pick the nearest to the start, and allow
779 // the rest to be cached back.
780 if (NewPaused.empty()) {
781 MoveDominatedPathToEnd(TerminatedPaths);
782 TerminatedPath Result = TerminatedPaths.pop_back_val();
783 return {Result, std::move(TerminatedPaths)};
784 }
785
786 MemoryAccess *DefChainEnd = nullptr;
787 SmallVector<TerminatedPath, 4> Clobbers;
788 for (ListIndex Paused : NewPaused) {
789 UpwardsWalkResult WR = walkToPhiOrClobber(Paths[Paused]);
790 if (WR.IsKnownClobber)
791 Clobbers.push_back({WR.Result, Paused});
792 else
793 // Micro-opt: If we hit the end of the chain, save it.
794 DefChainEnd = WR.Result;
795 }
796
797 if (!TerminatedPaths.empty()) {
798 // If we couldn't find the dominating phi/liveOnEntry in the above loop,
799 // do it now.
800 if (!DefChainEnd)
801 for (auto *MA : def_chain(const_cast<MemoryAccess *>(Target)))
802 DefChainEnd = MA;
803
804 // If any of the terminated paths don't dominate the phi we'll try to
805 // optimize, we need to figure out what they are and quit.
806 const BasicBlock *ChainBB = DefChainEnd->getBlock();
807 for (const TerminatedPath &TP : TerminatedPaths) {
808 // Because we know that DefChainEnd is as "high" as we can go, we
809 // don't need local dominance checks; BB dominance is sufficient.
810 if (DT.dominates(ChainBB, TP.Clobber->getBlock()))
811 Clobbers.push_back(TP);
812 }
813 }
814
815 // If we have clobbers in the def chain, find the one closest to Current
816 // and quit.
817 if (!Clobbers.empty()) {
818 MoveDominatedPathToEnd(Clobbers);
819 TerminatedPath Result = Clobbers.pop_back_val();
820 return {Result, std::move(Clobbers)};
821 }
822
823 assert(all_of(NewPaused,
824 [&](ListIndex I) { return Paths[I].Last == DefChainEnd; }));
825
826 // Because liveOnEntry is a clobber, this must be a phi.
827 auto *DefChainPhi = cast<MemoryPhi>(DefChainEnd);
828
829 PriorPathsSize = Paths.size();
830 PausedSearches.clear();
831 for (ListIndex I : NewPaused)
832 addSearches(DefChainPhi, PausedSearches, I);
833 NewPaused.clear();
834
835 Current = DefChainPhi;
836 }
837 }
838
verifyOptResult(const OptznResult & R) const839 void verifyOptResult(const OptznResult &R) const {
840 assert(all_of(R.OtherClobbers, [&](const TerminatedPath &P) {
841 return MSSA.dominates(P.Clobber, R.PrimaryClobber.Clobber);
842 }));
843 }
844
resetPhiOptznState()845 void resetPhiOptznState() {
846 Paths.clear();
847 VisitedPhis.clear();
848 }
849
850 public:
ClobberWalker(const MemorySSA & MSSA,AliasAnalysis & AA,DominatorTree & DT)851 ClobberWalker(const MemorySSA &MSSA, AliasAnalysis &AA, DominatorTree &DT)
852 : MSSA(MSSA), AA(AA), DT(DT) {}
853
854 /// Finds the nearest clobber for the given query, optimizing phis if
855 /// possible.
findClobber(MemoryAccess * Start,UpwardsMemoryQuery & Q)856 MemoryAccess *findClobber(MemoryAccess *Start, UpwardsMemoryQuery &Q) {
857 Query = &Q;
858
859 MemoryAccess *Current = Start;
860 // This walker pretends uses don't exist. If we're handed one, silently grab
861 // its def. (This has the nice side-effect of ensuring we never cache uses)
862 if (auto *MU = dyn_cast<MemoryUse>(Start))
863 Current = MU->getDefiningAccess();
864
865 DefPath FirstDesc(Q.StartingLoc, Current, Current, None);
866 // Fast path for the overly-common case (no crazy phi optimization
867 // necessary)
868 UpwardsWalkResult WalkResult = walkToPhiOrClobber(FirstDesc);
869 MemoryAccess *Result;
870 if (WalkResult.IsKnownClobber) {
871 Result = WalkResult.Result;
872 Q.AR = WalkResult.AR;
873 } else {
874 OptznResult OptRes = tryOptimizePhi(cast<MemoryPhi>(FirstDesc.Last),
875 Current, Q.StartingLoc);
876 verifyOptResult(OptRes);
877 resetPhiOptznState();
878 Result = OptRes.PrimaryClobber.Clobber;
879 }
880
881 #ifdef EXPENSIVE_CHECKS
882 checkClobberSanity(Current, Result, Q.StartingLoc, MSSA, Q, AA);
883 #endif
884 return Result;
885 }
886
verify(const MemorySSA * MSSA)887 void verify(const MemorySSA *MSSA) { assert(MSSA == &this->MSSA); }
888 };
889
890 struct RenamePassData {
891 DomTreeNode *DTN;
892 DomTreeNode::const_iterator ChildIt;
893 MemoryAccess *IncomingVal;
894
RenamePassData__anonb12b10260511::RenamePassData895 RenamePassData(DomTreeNode *D, DomTreeNode::const_iterator It,
896 MemoryAccess *M)
897 : DTN(D), ChildIt(It), IncomingVal(M) {}
898
swap__anonb12b10260511::RenamePassData899 void swap(RenamePassData &RHS) {
900 std::swap(DTN, RHS.DTN);
901 std::swap(ChildIt, RHS.ChildIt);
902 std::swap(IncomingVal, RHS.IncomingVal);
903 }
904 };
905
906 } // end anonymous namespace
907
908 namespace llvm {
909
910 /// A MemorySSAWalker that does AA walks to disambiguate accesses. It no
911 /// longer does caching on its own, but the name has been retained for the
912 /// moment.
913 class MemorySSA::CachingWalker final : public MemorySSAWalker {
914 ClobberWalker Walker;
915
916 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *, UpwardsMemoryQuery &);
917
918 public:
919 CachingWalker(MemorySSA *, AliasAnalysis *, DominatorTree *);
920 ~CachingWalker() override = default;
921
922 using MemorySSAWalker::getClobberingMemoryAccess;
923
924 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *) override;
925 MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
926 const MemoryLocation &) override;
927 void invalidateInfo(MemoryAccess *) override;
928
verify(const MemorySSA * MSSA)929 void verify(const MemorySSA *MSSA) override {
930 MemorySSAWalker::verify(MSSA);
931 Walker.verify(MSSA);
932 }
933 };
934
935 } // end namespace llvm
936
renameSuccessorPhis(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)937 void MemorySSA::renameSuccessorPhis(BasicBlock *BB, MemoryAccess *IncomingVal,
938 bool RenameAllUses) {
939 // Pass through values to our successors
940 for (const BasicBlock *S : successors(BB)) {
941 auto It = PerBlockAccesses.find(S);
942 // Rename the phi nodes in our successor block
943 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
944 continue;
945 AccessList *Accesses = It->second.get();
946 auto *Phi = cast<MemoryPhi>(&Accesses->front());
947 if (RenameAllUses) {
948 int PhiIndex = Phi->getBasicBlockIndex(BB);
949 assert(PhiIndex != -1 && "Incomplete phi during partial rename");
950 Phi->setIncomingValue(PhiIndex, IncomingVal);
951 } else
952 Phi->addIncoming(IncomingVal, BB);
953 }
954 }
955
956 /// Rename a single basic block into MemorySSA form.
957 /// Uses the standard SSA renaming algorithm.
958 /// \returns The new incoming value.
renameBlock(BasicBlock * BB,MemoryAccess * IncomingVal,bool RenameAllUses)959 MemoryAccess *MemorySSA::renameBlock(BasicBlock *BB, MemoryAccess *IncomingVal,
960 bool RenameAllUses) {
961 auto It = PerBlockAccesses.find(BB);
962 // Skip most processing if the list is empty.
963 if (It != PerBlockAccesses.end()) {
964 AccessList *Accesses = It->second.get();
965 for (MemoryAccess &L : *Accesses) {
966 if (MemoryUseOrDef *MUD = dyn_cast<MemoryUseOrDef>(&L)) {
967 if (MUD->getDefiningAccess() == nullptr || RenameAllUses)
968 MUD->setDefiningAccess(IncomingVal);
969 if (isa<MemoryDef>(&L))
970 IncomingVal = &L;
971 } else {
972 IncomingVal = &L;
973 }
974 }
975 }
976 return IncomingVal;
977 }
978
979 /// This is the standard SSA renaming algorithm.
980 ///
981 /// We walk the dominator tree in preorder, renaming accesses, and then filling
982 /// in phi nodes in our successors.
renamePass(DomTreeNode * Root,MemoryAccess * IncomingVal,SmallPtrSetImpl<BasicBlock * > & Visited,bool SkipVisited,bool RenameAllUses)983 void MemorySSA::renamePass(DomTreeNode *Root, MemoryAccess *IncomingVal,
984 SmallPtrSetImpl<BasicBlock *> &Visited,
985 bool SkipVisited, bool RenameAllUses) {
986 SmallVector<RenamePassData, 32> WorkStack;
987 // Skip everything if we already renamed this block and we are skipping.
988 // Note: You can't sink this into the if, because we need it to occur
989 // regardless of whether we skip blocks or not.
990 bool AlreadyVisited = !Visited.insert(Root->getBlock()).second;
991 if (SkipVisited && AlreadyVisited)
992 return;
993
994 IncomingVal = renameBlock(Root->getBlock(), IncomingVal, RenameAllUses);
995 renameSuccessorPhis(Root->getBlock(), IncomingVal, RenameAllUses);
996 WorkStack.push_back({Root, Root->begin(), IncomingVal});
997
998 while (!WorkStack.empty()) {
999 DomTreeNode *Node = WorkStack.back().DTN;
1000 DomTreeNode::const_iterator ChildIt = WorkStack.back().ChildIt;
1001 IncomingVal = WorkStack.back().IncomingVal;
1002
1003 if (ChildIt == Node->end()) {
1004 WorkStack.pop_back();
1005 } else {
1006 DomTreeNode *Child = *ChildIt;
1007 ++WorkStack.back().ChildIt;
1008 BasicBlock *BB = Child->getBlock();
1009 // Note: You can't sink this into the if, because we need it to occur
1010 // regardless of whether we skip blocks or not.
1011 AlreadyVisited = !Visited.insert(BB).second;
1012 if (SkipVisited && AlreadyVisited) {
1013 // We already visited this during our renaming, which can happen when
1014 // being asked to rename multiple blocks. Figure out the incoming val,
1015 // which is the last def.
1016 // Incoming value can only change if there is a block def, and in that
1017 // case, it's the last block def in the list.
1018 if (auto *BlockDefs = getWritableBlockDefs(BB))
1019 IncomingVal = &*BlockDefs->rbegin();
1020 } else
1021 IncomingVal = renameBlock(BB, IncomingVal, RenameAllUses);
1022 renameSuccessorPhis(BB, IncomingVal, RenameAllUses);
1023 WorkStack.push_back({Child, Child->begin(), IncomingVal});
1024 }
1025 }
1026 }
1027
1028 /// This handles unreachable block accesses by deleting phi nodes in
1029 /// unreachable blocks, and marking all other unreachable MemoryAccess's as
1030 /// being uses of the live on entry definition.
markUnreachableAsLiveOnEntry(BasicBlock * BB)1031 void MemorySSA::markUnreachableAsLiveOnEntry(BasicBlock *BB) {
1032 assert(!DT->isReachableFromEntry(BB) &&
1033 "Reachable block found while handling unreachable blocks");
1034
1035 // Make sure phi nodes in our reachable successors end up with a
1036 // LiveOnEntryDef for our incoming edge, even though our block is forward
1037 // unreachable. We could just disconnect these blocks from the CFG fully,
1038 // but we do not right now.
1039 for (const BasicBlock *S : successors(BB)) {
1040 if (!DT->isReachableFromEntry(S))
1041 continue;
1042 auto It = PerBlockAccesses.find(S);
1043 // Rename the phi nodes in our successor block
1044 if (It == PerBlockAccesses.end() || !isa<MemoryPhi>(It->second->front()))
1045 continue;
1046 AccessList *Accesses = It->second.get();
1047 auto *Phi = cast<MemoryPhi>(&Accesses->front());
1048 Phi->addIncoming(LiveOnEntryDef.get(), BB);
1049 }
1050
1051 auto It = PerBlockAccesses.find(BB);
1052 if (It == PerBlockAccesses.end())
1053 return;
1054
1055 auto &Accesses = It->second;
1056 for (auto AI = Accesses->begin(), AE = Accesses->end(); AI != AE;) {
1057 auto Next = std::next(AI);
1058 // If we have a phi, just remove it. We are going to replace all
1059 // users with live on entry.
1060 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(AI))
1061 UseOrDef->setDefiningAccess(LiveOnEntryDef.get());
1062 else
1063 Accesses->erase(AI);
1064 AI = Next;
1065 }
1066 }
1067
MemorySSA(Function & Func,AliasAnalysis * AA,DominatorTree * DT)1068 MemorySSA::MemorySSA(Function &Func, AliasAnalysis *AA, DominatorTree *DT)
1069 : AA(AA), DT(DT), F(Func), LiveOnEntryDef(nullptr), Walker(nullptr),
1070 NextID(0) {
1071 buildMemorySSA();
1072 }
1073
~MemorySSA()1074 MemorySSA::~MemorySSA() {
1075 // Drop all our references
1076 for (const auto &Pair : PerBlockAccesses)
1077 for (MemoryAccess &MA : *Pair.second)
1078 MA.dropAllReferences();
1079 }
1080
getOrCreateAccessList(const BasicBlock * BB)1081 MemorySSA::AccessList *MemorySSA::getOrCreateAccessList(const BasicBlock *BB) {
1082 auto Res = PerBlockAccesses.insert(std::make_pair(BB, nullptr));
1083
1084 if (Res.second)
1085 Res.first->second = llvm::make_unique<AccessList>();
1086 return Res.first->second.get();
1087 }
1088
getOrCreateDefsList(const BasicBlock * BB)1089 MemorySSA::DefsList *MemorySSA::getOrCreateDefsList(const BasicBlock *BB) {
1090 auto Res = PerBlockDefs.insert(std::make_pair(BB, nullptr));
1091
1092 if (Res.second)
1093 Res.first->second = llvm::make_unique<DefsList>();
1094 return Res.first->second.get();
1095 }
1096
1097 namespace llvm {
1098
1099 /// This class is a batch walker of all MemoryUse's in the program, and points
1100 /// their defining access at the thing that actually clobbers them. Because it
1101 /// is a batch walker that touches everything, it does not operate like the
1102 /// other walkers. This walker is basically performing a top-down SSA renaming
1103 /// pass, where the version stack is used as the cache. This enables it to be
1104 /// significantly more time and memory efficient than using the regular walker,
1105 /// which is walking bottom-up.
1106 class MemorySSA::OptimizeUses {
1107 public:
OptimizeUses(MemorySSA * MSSA,MemorySSAWalker * Walker,AliasAnalysis * AA,DominatorTree * DT)1108 OptimizeUses(MemorySSA *MSSA, MemorySSAWalker *Walker, AliasAnalysis *AA,
1109 DominatorTree *DT)
1110 : MSSA(MSSA), Walker(Walker), AA(AA), DT(DT) {
1111 Walker = MSSA->getWalker();
1112 }
1113
1114 void optimizeUses();
1115
1116 private:
1117 /// This represents where a given memorylocation is in the stack.
1118 struct MemlocStackInfo {
1119 // This essentially is keeping track of versions of the stack. Whenever
1120 // the stack changes due to pushes or pops, these versions increase.
1121 unsigned long StackEpoch;
1122 unsigned long PopEpoch;
1123 // This is the lower bound of places on the stack to check. It is equal to
1124 // the place the last stack walk ended.
1125 // Note: Correctness depends on this being initialized to 0, which densemap
1126 // does
1127 unsigned long LowerBound;
1128 const BasicBlock *LowerBoundBlock;
1129 // This is where the last walk for this memory location ended.
1130 unsigned long LastKill;
1131 bool LastKillValid;
1132 Optional<AliasResult> AR;
1133 };
1134
1135 void optimizeUsesInBlock(const BasicBlock *, unsigned long &, unsigned long &,
1136 SmallVectorImpl<MemoryAccess *> &,
1137 DenseMap<MemoryLocOrCall, MemlocStackInfo> &);
1138
1139 MemorySSA *MSSA;
1140 MemorySSAWalker *Walker;
1141 AliasAnalysis *AA;
1142 DominatorTree *DT;
1143 };
1144
1145 } // end namespace llvm
1146
1147 /// Optimize the uses in a given block This is basically the SSA renaming
1148 /// algorithm, with one caveat: We are able to use a single stack for all
1149 /// MemoryUses. This is because the set of *possible* reaching MemoryDefs is
1150 /// the same for every MemoryUse. The *actual* clobbering MemoryDef is just
1151 /// going to be some position in that stack of possible ones.
1152 ///
1153 /// We track the stack positions that each MemoryLocation needs
1154 /// to check, and last ended at. This is because we only want to check the
1155 /// things that changed since last time. The same MemoryLocation should
1156 /// get clobbered by the same store (getModRefInfo does not use invariantness or
1157 /// things like this, and if they start, we can modify MemoryLocOrCall to
1158 /// include relevant data)
optimizeUsesInBlock(const BasicBlock * BB,unsigned long & StackEpoch,unsigned long & PopEpoch,SmallVectorImpl<MemoryAccess * > & VersionStack,DenseMap<MemoryLocOrCall,MemlocStackInfo> & LocStackInfo)1159 void MemorySSA::OptimizeUses::optimizeUsesInBlock(
1160 const BasicBlock *BB, unsigned long &StackEpoch, unsigned long &PopEpoch,
1161 SmallVectorImpl<MemoryAccess *> &VersionStack,
1162 DenseMap<MemoryLocOrCall, MemlocStackInfo> &LocStackInfo) {
1163
1164 /// If no accesses, nothing to do.
1165 MemorySSA::AccessList *Accesses = MSSA->getWritableBlockAccesses(BB);
1166 if (Accesses == nullptr)
1167 return;
1168
1169 // Pop everything that doesn't dominate the current block off the stack,
1170 // increment the PopEpoch to account for this.
1171 while (true) {
1172 assert(
1173 !VersionStack.empty() &&
1174 "Version stack should have liveOnEntry sentinel dominating everything");
1175 BasicBlock *BackBlock = VersionStack.back()->getBlock();
1176 if (DT->dominates(BackBlock, BB))
1177 break;
1178 while (VersionStack.back()->getBlock() == BackBlock)
1179 VersionStack.pop_back();
1180 ++PopEpoch;
1181 }
1182
1183 for (MemoryAccess &MA : *Accesses) {
1184 auto *MU = dyn_cast<MemoryUse>(&MA);
1185 if (!MU) {
1186 VersionStack.push_back(&MA);
1187 ++StackEpoch;
1188 continue;
1189 }
1190
1191 if (isUseTriviallyOptimizableToLiveOnEntry(*AA, MU->getMemoryInst())) {
1192 MU->setDefiningAccess(MSSA->getLiveOnEntryDef(), true, None);
1193 continue;
1194 }
1195
1196 MemoryLocOrCall UseMLOC(MU);
1197 auto &LocInfo = LocStackInfo[UseMLOC];
1198 // If the pop epoch changed, it means we've removed stuff from top of
1199 // stack due to changing blocks. We may have to reset the lower bound or
1200 // last kill info.
1201 if (LocInfo.PopEpoch != PopEpoch) {
1202 LocInfo.PopEpoch = PopEpoch;
1203 LocInfo.StackEpoch = StackEpoch;
1204 // If the lower bound was in something that no longer dominates us, we
1205 // have to reset it.
1206 // We can't simply track stack size, because the stack may have had
1207 // pushes/pops in the meantime.
1208 // XXX: This is non-optimal, but only is slower cases with heavily
1209 // branching dominator trees. To get the optimal number of queries would
1210 // be to make lowerbound and lastkill a per-loc stack, and pop it until
1211 // the top of that stack dominates us. This does not seem worth it ATM.
1212 // A much cheaper optimization would be to always explore the deepest
1213 // branch of the dominator tree first. This will guarantee this resets on
1214 // the smallest set of blocks.
1215 if (LocInfo.LowerBoundBlock && LocInfo.LowerBoundBlock != BB &&
1216 !DT->dominates(LocInfo.LowerBoundBlock, BB)) {
1217 // Reset the lower bound of things to check.
1218 // TODO: Some day we should be able to reset to last kill, rather than
1219 // 0.
1220 LocInfo.LowerBound = 0;
1221 LocInfo.LowerBoundBlock = VersionStack[0]->getBlock();
1222 LocInfo.LastKillValid = false;
1223 }
1224 } else if (LocInfo.StackEpoch != StackEpoch) {
1225 // If all that has changed is the StackEpoch, we only have to check the
1226 // new things on the stack, because we've checked everything before. In
1227 // this case, the lower bound of things to check remains the same.
1228 LocInfo.PopEpoch = PopEpoch;
1229 LocInfo.StackEpoch = StackEpoch;
1230 }
1231 if (!LocInfo.LastKillValid) {
1232 LocInfo.LastKill = VersionStack.size() - 1;
1233 LocInfo.LastKillValid = true;
1234 LocInfo.AR = MayAlias;
1235 }
1236
1237 // At this point, we should have corrected last kill and LowerBound to be
1238 // in bounds.
1239 assert(LocInfo.LowerBound < VersionStack.size() &&
1240 "Lower bound out of range");
1241 assert(LocInfo.LastKill < VersionStack.size() &&
1242 "Last kill info out of range");
1243 // In any case, the new upper bound is the top of the stack.
1244 unsigned long UpperBound = VersionStack.size() - 1;
1245
1246 if (UpperBound - LocInfo.LowerBound > MaxCheckLimit) {
1247 LLVM_DEBUG(dbgs() << "MemorySSA skipping optimization of " << *MU << " ("
1248 << *(MU->getMemoryInst()) << ")"
1249 << " because there are "
1250 << UpperBound - LocInfo.LowerBound
1251 << " stores to disambiguate\n");
1252 // Because we did not walk, LastKill is no longer valid, as this may
1253 // have been a kill.
1254 LocInfo.LastKillValid = false;
1255 continue;
1256 }
1257 bool FoundClobberResult = false;
1258 while (UpperBound > LocInfo.LowerBound) {
1259 if (isa<MemoryPhi>(VersionStack[UpperBound])) {
1260 // For phis, use the walker, see where we ended up, go there
1261 Instruction *UseInst = MU->getMemoryInst();
1262 MemoryAccess *Result = Walker->getClobberingMemoryAccess(UseInst);
1263 // We are guaranteed to find it or something is wrong
1264 while (VersionStack[UpperBound] != Result) {
1265 assert(UpperBound != 0);
1266 --UpperBound;
1267 }
1268 FoundClobberResult = true;
1269 break;
1270 }
1271
1272 MemoryDef *MD = cast<MemoryDef>(VersionStack[UpperBound]);
1273 // If the lifetime of the pointer ends at this instruction, it's live on
1274 // entry.
1275 if (!UseMLOC.IsCall && lifetimeEndsAt(MD, UseMLOC.getLoc(), *AA)) {
1276 // Reset UpperBound to liveOnEntryDef's place in the stack
1277 UpperBound = 0;
1278 FoundClobberResult = true;
1279 LocInfo.AR = MustAlias;
1280 break;
1281 }
1282 ClobberAlias CA = instructionClobbersQuery(MD, MU, UseMLOC, *AA);
1283 if (CA.IsClobber) {
1284 FoundClobberResult = true;
1285 LocInfo.AR = CA.AR;
1286 break;
1287 }
1288 --UpperBound;
1289 }
1290
1291 // Note: Phis always have AliasResult AR set to MayAlias ATM.
1292
1293 // At the end of this loop, UpperBound is either a clobber, or lower bound
1294 // PHI walking may cause it to be < LowerBound, and in fact, < LastKill.
1295 if (FoundClobberResult || UpperBound < LocInfo.LastKill) {
1296 // We were last killed now by where we got to
1297 if (MSSA->isLiveOnEntryDef(VersionStack[UpperBound]))
1298 LocInfo.AR = None;
1299 MU->setDefiningAccess(VersionStack[UpperBound], true, LocInfo.AR);
1300 LocInfo.LastKill = UpperBound;
1301 } else {
1302 // Otherwise, we checked all the new ones, and now we know we can get to
1303 // LastKill.
1304 MU->setDefiningAccess(VersionStack[LocInfo.LastKill], true, LocInfo.AR);
1305 }
1306 LocInfo.LowerBound = VersionStack.size() - 1;
1307 LocInfo.LowerBoundBlock = BB;
1308 }
1309 }
1310
1311 /// Optimize uses to point to their actual clobbering definitions.
optimizeUses()1312 void MemorySSA::OptimizeUses::optimizeUses() {
1313 SmallVector<MemoryAccess *, 16> VersionStack;
1314 DenseMap<MemoryLocOrCall, MemlocStackInfo> LocStackInfo;
1315 VersionStack.push_back(MSSA->getLiveOnEntryDef());
1316
1317 unsigned long StackEpoch = 1;
1318 unsigned long PopEpoch = 1;
1319 // We perform a non-recursive top-down dominator tree walk.
1320 for (const auto *DomNode : depth_first(DT->getRootNode()))
1321 optimizeUsesInBlock(DomNode->getBlock(), StackEpoch, PopEpoch, VersionStack,
1322 LocStackInfo);
1323 }
1324
placePHINodes(const SmallPtrSetImpl<BasicBlock * > & DefiningBlocks)1325 void MemorySSA::placePHINodes(
1326 const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks) {
1327 // Determine where our MemoryPhi's should go
1328 ForwardIDFCalculator IDFs(*DT);
1329 IDFs.setDefiningBlocks(DefiningBlocks);
1330 SmallVector<BasicBlock *, 32> IDFBlocks;
1331 IDFs.calculate(IDFBlocks);
1332
1333 // Now place MemoryPhi nodes.
1334 for (auto &BB : IDFBlocks)
1335 createMemoryPhi(BB);
1336 }
1337
buildMemorySSA()1338 void MemorySSA::buildMemorySSA() {
1339 // We create an access to represent "live on entry", for things like
1340 // arguments or users of globals, where the memory they use is defined before
1341 // the beginning of the function. We do not actually insert it into the IR.
1342 // We do not define a live on exit for the immediate uses, and thus our
1343 // semantics do *not* imply that something with no immediate uses can simply
1344 // be removed.
1345 BasicBlock &StartingPoint = F.getEntryBlock();
1346 LiveOnEntryDef.reset(new MemoryDef(F.getContext(), nullptr, nullptr,
1347 &StartingPoint, NextID++));
1348
1349 // We maintain lists of memory accesses per-block, trading memory for time. We
1350 // could just look up the memory access for every possible instruction in the
1351 // stream.
1352 SmallPtrSet<BasicBlock *, 32> DefiningBlocks;
1353 // Go through each block, figure out where defs occur, and chain together all
1354 // the accesses.
1355 for (BasicBlock &B : F) {
1356 bool InsertIntoDef = false;
1357 AccessList *Accesses = nullptr;
1358 DefsList *Defs = nullptr;
1359 for (Instruction &I : B) {
1360 MemoryUseOrDef *MUD = createNewAccess(&I);
1361 if (!MUD)
1362 continue;
1363
1364 if (!Accesses)
1365 Accesses = getOrCreateAccessList(&B);
1366 Accesses->push_back(MUD);
1367 if (isa<MemoryDef>(MUD)) {
1368 InsertIntoDef = true;
1369 if (!Defs)
1370 Defs = getOrCreateDefsList(&B);
1371 Defs->push_back(*MUD);
1372 }
1373 }
1374 if (InsertIntoDef)
1375 DefiningBlocks.insert(&B);
1376 }
1377 placePHINodes(DefiningBlocks);
1378
1379 // Now do regular SSA renaming on the MemoryDef/MemoryUse. Visited will get
1380 // filled in with all blocks.
1381 SmallPtrSet<BasicBlock *, 16> Visited;
1382 renamePass(DT->getRootNode(), LiveOnEntryDef.get(), Visited);
1383
1384 CachingWalker *Walker = getWalkerImpl();
1385
1386 OptimizeUses(this, Walker, AA, DT).optimizeUses();
1387
1388 // Mark the uses in unreachable blocks as live on entry, so that they go
1389 // somewhere.
1390 for (auto &BB : F)
1391 if (!Visited.count(&BB))
1392 markUnreachableAsLiveOnEntry(&BB);
1393 }
1394
getWalker()1395 MemorySSAWalker *MemorySSA::getWalker() { return getWalkerImpl(); }
1396
getWalkerImpl()1397 MemorySSA::CachingWalker *MemorySSA::getWalkerImpl() {
1398 if (Walker)
1399 return Walker.get();
1400
1401 Walker = llvm::make_unique<CachingWalker>(this, AA, DT);
1402 return Walker.get();
1403 }
1404
1405 // This is a helper function used by the creation routines. It places NewAccess
1406 // into the access and defs lists for a given basic block, at the given
1407 // insertion point.
insertIntoListsForBlock(MemoryAccess * NewAccess,const BasicBlock * BB,InsertionPlace Point)1408 void MemorySSA::insertIntoListsForBlock(MemoryAccess *NewAccess,
1409 const BasicBlock *BB,
1410 InsertionPlace Point) {
1411 auto *Accesses = getOrCreateAccessList(BB);
1412 if (Point == Beginning) {
1413 // If it's a phi node, it goes first, otherwise, it goes after any phi
1414 // nodes.
1415 if (isa<MemoryPhi>(NewAccess)) {
1416 Accesses->push_front(NewAccess);
1417 auto *Defs = getOrCreateDefsList(BB);
1418 Defs->push_front(*NewAccess);
1419 } else {
1420 auto AI = find_if_not(
1421 *Accesses, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1422 Accesses->insert(AI, NewAccess);
1423 if (!isa<MemoryUse>(NewAccess)) {
1424 auto *Defs = getOrCreateDefsList(BB);
1425 auto DI = find_if_not(
1426 *Defs, [](const MemoryAccess &MA) { return isa<MemoryPhi>(MA); });
1427 Defs->insert(DI, *NewAccess);
1428 }
1429 }
1430 } else {
1431 Accesses->push_back(NewAccess);
1432 if (!isa<MemoryUse>(NewAccess)) {
1433 auto *Defs = getOrCreateDefsList(BB);
1434 Defs->push_back(*NewAccess);
1435 }
1436 }
1437 BlockNumberingValid.erase(BB);
1438 }
1439
insertIntoListsBefore(MemoryAccess * What,const BasicBlock * BB,AccessList::iterator InsertPt)1440 void MemorySSA::insertIntoListsBefore(MemoryAccess *What, const BasicBlock *BB,
1441 AccessList::iterator InsertPt) {
1442 auto *Accesses = getWritableBlockAccesses(BB);
1443 bool WasEnd = InsertPt == Accesses->end();
1444 Accesses->insert(AccessList::iterator(InsertPt), What);
1445 if (!isa<MemoryUse>(What)) {
1446 auto *Defs = getOrCreateDefsList(BB);
1447 // If we got asked to insert at the end, we have an easy job, just shove it
1448 // at the end. If we got asked to insert before an existing def, we also get
1449 // an iterator. If we got asked to insert before a use, we have to hunt for
1450 // the next def.
1451 if (WasEnd) {
1452 Defs->push_back(*What);
1453 } else if (isa<MemoryDef>(InsertPt)) {
1454 Defs->insert(InsertPt->getDefsIterator(), *What);
1455 } else {
1456 while (InsertPt != Accesses->end() && !isa<MemoryDef>(InsertPt))
1457 ++InsertPt;
1458 // Either we found a def, or we are inserting at the end
1459 if (InsertPt == Accesses->end())
1460 Defs->push_back(*What);
1461 else
1462 Defs->insert(InsertPt->getDefsIterator(), *What);
1463 }
1464 }
1465 BlockNumberingValid.erase(BB);
1466 }
1467
1468 // Move What before Where in the IR. The end result is that What will belong to
1469 // the right lists and have the right Block set, but will not otherwise be
1470 // correct. It will not have the right defining access, and if it is a def,
1471 // things below it will not properly be updated.
moveTo(MemoryUseOrDef * What,BasicBlock * BB,AccessList::iterator Where)1472 void MemorySSA::moveTo(MemoryUseOrDef *What, BasicBlock *BB,
1473 AccessList::iterator Where) {
1474 // Keep it in the lookup tables, remove from the lists
1475 removeFromLists(What, false);
1476 What->setBlock(BB);
1477 insertIntoListsBefore(What, BB, Where);
1478 }
1479
moveTo(MemoryAccess * What,BasicBlock * BB,InsertionPlace Point)1480 void MemorySSA::moveTo(MemoryAccess *What, BasicBlock *BB,
1481 InsertionPlace Point) {
1482 if (isa<MemoryPhi>(What)) {
1483 assert(Point == Beginning &&
1484 "Can only move a Phi at the beginning of the block");
1485 // Update lookup table entry
1486 ValueToMemoryAccess.erase(What->getBlock());
1487 bool Inserted = ValueToMemoryAccess.insert({BB, What}).second;
1488 (void)Inserted;
1489 assert(Inserted && "Cannot move a Phi to a block that already has one");
1490 }
1491
1492 removeFromLists(What, false);
1493 What->setBlock(BB);
1494 insertIntoListsForBlock(What, BB, Point);
1495 }
1496
createMemoryPhi(BasicBlock * BB)1497 MemoryPhi *MemorySSA::createMemoryPhi(BasicBlock *BB) {
1498 assert(!getMemoryAccess(BB) && "MemoryPhi already exists for this BB");
1499 MemoryPhi *Phi = new MemoryPhi(BB->getContext(), BB, NextID++);
1500 // Phi's always are placed at the front of the block.
1501 insertIntoListsForBlock(Phi, BB, Beginning);
1502 ValueToMemoryAccess[BB] = Phi;
1503 return Phi;
1504 }
1505
createDefinedAccess(Instruction * I,MemoryAccess * Definition)1506 MemoryUseOrDef *MemorySSA::createDefinedAccess(Instruction *I,
1507 MemoryAccess *Definition) {
1508 assert(!isa<PHINode>(I) && "Cannot create a defined access for a PHI");
1509 MemoryUseOrDef *NewAccess = createNewAccess(I);
1510 assert(
1511 NewAccess != nullptr &&
1512 "Tried to create a memory access for a non-memory touching instruction");
1513 NewAccess->setDefiningAccess(Definition);
1514 return NewAccess;
1515 }
1516
1517 // Return true if the instruction has ordering constraints.
1518 // Note specifically that this only considers stores and loads
1519 // because others are still considered ModRef by getModRefInfo.
isOrdered(const Instruction * I)1520 static inline bool isOrdered(const Instruction *I) {
1521 if (auto *SI = dyn_cast<StoreInst>(I)) {
1522 if (!SI->isUnordered())
1523 return true;
1524 } else if (auto *LI = dyn_cast<LoadInst>(I)) {
1525 if (!LI->isUnordered())
1526 return true;
1527 }
1528 return false;
1529 }
1530
1531 /// Helper function to create new memory accesses
createNewAccess(Instruction * I)1532 MemoryUseOrDef *MemorySSA::createNewAccess(Instruction *I) {
1533 // The assume intrinsic has a control dependency which we model by claiming
1534 // that it writes arbitrarily. Ignore that fake memory dependency here.
1535 // FIXME: Replace this special casing with a more accurate modelling of
1536 // assume's control dependency.
1537 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1538 if (II->getIntrinsicID() == Intrinsic::assume)
1539 return nullptr;
1540
1541 // Find out what affect this instruction has on memory.
1542 ModRefInfo ModRef = AA->getModRefInfo(I, None);
1543 // The isOrdered check is used to ensure that volatiles end up as defs
1544 // (atomics end up as ModRef right now anyway). Until we separate the
1545 // ordering chain from the memory chain, this enables people to see at least
1546 // some relative ordering to volatiles. Note that getClobberingMemoryAccess
1547 // will still give an answer that bypasses other volatile loads. TODO:
1548 // Separate memory aliasing and ordering into two different chains so that we
1549 // can precisely represent both "what memory will this read/write/is clobbered
1550 // by" and "what instructions can I move this past".
1551 bool Def = isModSet(ModRef) || isOrdered(I);
1552 bool Use = isRefSet(ModRef);
1553
1554 // It's possible for an instruction to not modify memory at all. During
1555 // construction, we ignore them.
1556 if (!Def && !Use)
1557 return nullptr;
1558
1559 MemoryUseOrDef *MUD;
1560 if (Def)
1561 MUD = new MemoryDef(I->getContext(), nullptr, I, I->getParent(), NextID++);
1562 else
1563 MUD = new MemoryUse(I->getContext(), nullptr, I, I->getParent());
1564 ValueToMemoryAccess[I] = MUD;
1565 return MUD;
1566 }
1567
1568 /// Returns true if \p Replacer dominates \p Replacee .
dominatesUse(const MemoryAccess * Replacer,const MemoryAccess * Replacee) const1569 bool MemorySSA::dominatesUse(const MemoryAccess *Replacer,
1570 const MemoryAccess *Replacee) const {
1571 if (isa<MemoryUseOrDef>(Replacee))
1572 return DT->dominates(Replacer->getBlock(), Replacee->getBlock());
1573 const auto *MP = cast<MemoryPhi>(Replacee);
1574 // For a phi node, the use occurs in the predecessor block of the phi node.
1575 // Since we may occur multiple times in the phi node, we have to check each
1576 // operand to ensure Replacer dominates each operand where Replacee occurs.
1577 for (const Use &Arg : MP->operands()) {
1578 if (Arg.get() != Replacee &&
1579 !DT->dominates(Replacer->getBlock(), MP->getIncomingBlock(Arg)))
1580 return false;
1581 }
1582 return true;
1583 }
1584
1585 /// Properly remove \p MA from all of MemorySSA's lookup tables.
removeFromLookups(MemoryAccess * MA)1586 void MemorySSA::removeFromLookups(MemoryAccess *MA) {
1587 assert(MA->use_empty() &&
1588 "Trying to remove memory access that still has uses");
1589 BlockNumbering.erase(MA);
1590 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1591 MUD->setDefiningAccess(nullptr);
1592 // Invalidate our walker's cache if necessary
1593 if (!isa<MemoryUse>(MA))
1594 Walker->invalidateInfo(MA);
1595
1596 Value *MemoryInst;
1597 if (const auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
1598 MemoryInst = MUD->getMemoryInst();
1599 else
1600 MemoryInst = MA->getBlock();
1601
1602 auto VMA = ValueToMemoryAccess.find(MemoryInst);
1603 if (VMA->second == MA)
1604 ValueToMemoryAccess.erase(VMA);
1605 }
1606
1607 /// Properly remove \p MA from all of MemorySSA's lists.
1608 ///
1609 /// Because of the way the intrusive list and use lists work, it is important to
1610 /// do removal in the right order.
1611 /// ShouldDelete defaults to true, and will cause the memory access to also be
1612 /// deleted, not just removed.
removeFromLists(MemoryAccess * MA,bool ShouldDelete)1613 void MemorySSA::removeFromLists(MemoryAccess *MA, bool ShouldDelete) {
1614 BasicBlock *BB = MA->getBlock();
1615 // The access list owns the reference, so we erase it from the non-owning list
1616 // first.
1617 if (!isa<MemoryUse>(MA)) {
1618 auto DefsIt = PerBlockDefs.find(BB);
1619 std::unique_ptr<DefsList> &Defs = DefsIt->second;
1620 Defs->remove(*MA);
1621 if (Defs->empty())
1622 PerBlockDefs.erase(DefsIt);
1623 }
1624
1625 // The erase call here will delete it. If we don't want it deleted, we call
1626 // remove instead.
1627 auto AccessIt = PerBlockAccesses.find(BB);
1628 std::unique_ptr<AccessList> &Accesses = AccessIt->second;
1629 if (ShouldDelete)
1630 Accesses->erase(MA);
1631 else
1632 Accesses->remove(MA);
1633
1634 if (Accesses->empty()) {
1635 PerBlockAccesses.erase(AccessIt);
1636 BlockNumberingValid.erase(BB);
1637 }
1638 }
1639
print(raw_ostream & OS) const1640 void MemorySSA::print(raw_ostream &OS) const {
1641 MemorySSAAnnotatedWriter Writer(this);
1642 F.print(OS, &Writer);
1643 }
1644
1645 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1646 LLVM_DUMP_METHOD void MemorySSA::dump() const { print(dbgs()); }
1647 #endif
1648
verifyMemorySSA() const1649 void MemorySSA::verifyMemorySSA() const {
1650 verifyDefUses(F);
1651 verifyDomination(F);
1652 verifyOrdering(F);
1653 verifyDominationNumbers(F);
1654 Walker->verify(this);
1655 }
1656
1657 /// Verify that all of the blocks we believe to have valid domination numbers
1658 /// actually have valid domination numbers.
verifyDominationNumbers(const Function & F) const1659 void MemorySSA::verifyDominationNumbers(const Function &F) const {
1660 #ifndef NDEBUG
1661 if (BlockNumberingValid.empty())
1662 return;
1663
1664 SmallPtrSet<const BasicBlock *, 16> ValidBlocks = BlockNumberingValid;
1665 for (const BasicBlock &BB : F) {
1666 if (!ValidBlocks.count(&BB))
1667 continue;
1668
1669 ValidBlocks.erase(&BB);
1670
1671 const AccessList *Accesses = getBlockAccesses(&BB);
1672 // It's correct to say an empty block has valid numbering.
1673 if (!Accesses)
1674 continue;
1675
1676 // Block numbering starts at 1.
1677 unsigned long LastNumber = 0;
1678 for (const MemoryAccess &MA : *Accesses) {
1679 auto ThisNumberIter = BlockNumbering.find(&MA);
1680 assert(ThisNumberIter != BlockNumbering.end() &&
1681 "MemoryAccess has no domination number in a valid block!");
1682
1683 unsigned long ThisNumber = ThisNumberIter->second;
1684 assert(ThisNumber > LastNumber &&
1685 "Domination numbers should be strictly increasing!");
1686 LastNumber = ThisNumber;
1687 }
1688 }
1689
1690 assert(ValidBlocks.empty() &&
1691 "All valid BasicBlocks should exist in F -- dangling pointers?");
1692 #endif
1693 }
1694
1695 /// Verify that the order and existence of MemoryAccesses matches the
1696 /// order and existence of memory affecting instructions.
verifyOrdering(Function & F) const1697 void MemorySSA::verifyOrdering(Function &F) const {
1698 // Walk all the blocks, comparing what the lookups think and what the access
1699 // lists think, as well as the order in the blocks vs the order in the access
1700 // lists.
1701 SmallVector<MemoryAccess *, 32> ActualAccesses;
1702 SmallVector<MemoryAccess *, 32> ActualDefs;
1703 for (BasicBlock &B : F) {
1704 const AccessList *AL = getBlockAccesses(&B);
1705 const auto *DL = getBlockDefs(&B);
1706 MemoryAccess *Phi = getMemoryAccess(&B);
1707 if (Phi) {
1708 ActualAccesses.push_back(Phi);
1709 ActualDefs.push_back(Phi);
1710 }
1711
1712 for (Instruction &I : B) {
1713 MemoryAccess *MA = getMemoryAccess(&I);
1714 assert((!MA || (AL && (isa<MemoryUse>(MA) || DL))) &&
1715 "We have memory affecting instructions "
1716 "in this block but they are not in the "
1717 "access list or defs list");
1718 if (MA) {
1719 ActualAccesses.push_back(MA);
1720 if (isa<MemoryDef>(MA))
1721 ActualDefs.push_back(MA);
1722 }
1723 }
1724 // Either we hit the assert, really have no accesses, or we have both
1725 // accesses and an access list.
1726 // Same with defs.
1727 if (!AL && !DL)
1728 continue;
1729 assert(AL->size() == ActualAccesses.size() &&
1730 "We don't have the same number of accesses in the block as on the "
1731 "access list");
1732 assert((DL || ActualDefs.size() == 0) &&
1733 "Either we should have a defs list, or we should have no defs");
1734 assert((!DL || DL->size() == ActualDefs.size()) &&
1735 "We don't have the same number of defs in the block as on the "
1736 "def list");
1737 auto ALI = AL->begin();
1738 auto AAI = ActualAccesses.begin();
1739 while (ALI != AL->end() && AAI != ActualAccesses.end()) {
1740 assert(&*ALI == *AAI && "Not the same accesses in the same order");
1741 ++ALI;
1742 ++AAI;
1743 }
1744 ActualAccesses.clear();
1745 if (DL) {
1746 auto DLI = DL->begin();
1747 auto ADI = ActualDefs.begin();
1748 while (DLI != DL->end() && ADI != ActualDefs.end()) {
1749 assert(&*DLI == *ADI && "Not the same defs in the same order");
1750 ++DLI;
1751 ++ADI;
1752 }
1753 }
1754 ActualDefs.clear();
1755 }
1756 }
1757
1758 /// Verify the domination properties of MemorySSA by checking that each
1759 /// definition dominates all of its uses.
verifyDomination(Function & F) const1760 void MemorySSA::verifyDomination(Function &F) const {
1761 #ifndef NDEBUG
1762 for (BasicBlock &B : F) {
1763 // Phi nodes are attached to basic blocks
1764 if (MemoryPhi *MP = getMemoryAccess(&B))
1765 for (const Use &U : MP->uses())
1766 assert(dominates(MP, U) && "Memory PHI does not dominate it's uses");
1767
1768 for (Instruction &I : B) {
1769 MemoryAccess *MD = dyn_cast_or_null<MemoryDef>(getMemoryAccess(&I));
1770 if (!MD)
1771 continue;
1772
1773 for (const Use &U : MD->uses())
1774 assert(dominates(MD, U) && "Memory Def does not dominate it's uses");
1775 }
1776 }
1777 #endif
1778 }
1779
1780 /// Verify the def-use lists in MemorySSA, by verifying that \p Use
1781 /// appears in the use list of \p Def.
verifyUseInDefs(MemoryAccess * Def,MemoryAccess * Use) const1782 void MemorySSA::verifyUseInDefs(MemoryAccess *Def, MemoryAccess *Use) const {
1783 #ifndef NDEBUG
1784 // The live on entry use may cause us to get a NULL def here
1785 if (!Def)
1786 assert(isLiveOnEntryDef(Use) &&
1787 "Null def but use not point to live on entry def");
1788 else
1789 assert(is_contained(Def->users(), Use) &&
1790 "Did not find use in def's use list");
1791 #endif
1792 }
1793
1794 /// Verify the immediate use information, by walking all the memory
1795 /// accesses and verifying that, for each use, it appears in the
1796 /// appropriate def's use list
verifyDefUses(Function & F) const1797 void MemorySSA::verifyDefUses(Function &F) const {
1798 for (BasicBlock &B : F) {
1799 // Phi nodes are attached to basic blocks
1800 if (MemoryPhi *Phi = getMemoryAccess(&B)) {
1801 assert(Phi->getNumOperands() == static_cast<unsigned>(std::distance(
1802 pred_begin(&B), pred_end(&B))) &&
1803 "Incomplete MemoryPhi Node");
1804 for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
1805 verifyUseInDefs(Phi->getIncomingValue(I), Phi);
1806 assert(find(predecessors(&B), Phi->getIncomingBlock(I)) !=
1807 pred_end(&B) &&
1808 "Incoming phi block not a block predecessor");
1809 }
1810 }
1811
1812 for (Instruction &I : B) {
1813 if (MemoryUseOrDef *MA = getMemoryAccess(&I)) {
1814 verifyUseInDefs(MA->getDefiningAccess(), MA);
1815 }
1816 }
1817 }
1818 }
1819
getMemoryAccess(const Instruction * I) const1820 MemoryUseOrDef *MemorySSA::getMemoryAccess(const Instruction *I) const {
1821 return cast_or_null<MemoryUseOrDef>(ValueToMemoryAccess.lookup(I));
1822 }
1823
getMemoryAccess(const BasicBlock * BB) const1824 MemoryPhi *MemorySSA::getMemoryAccess(const BasicBlock *BB) const {
1825 return cast_or_null<MemoryPhi>(ValueToMemoryAccess.lookup(cast<Value>(BB)));
1826 }
1827
1828 /// Perform a local numbering on blocks so that instruction ordering can be
1829 /// determined in constant time.
1830 /// TODO: We currently just number in order. If we numbered by N, we could
1831 /// allow at least N-1 sequences of insertBefore or insertAfter (and at least
1832 /// log2(N) sequences of mixed before and after) without needing to invalidate
1833 /// the numbering.
renumberBlock(const BasicBlock * B) const1834 void MemorySSA::renumberBlock(const BasicBlock *B) const {
1835 // The pre-increment ensures the numbers really start at 1.
1836 unsigned long CurrentNumber = 0;
1837 const AccessList *AL = getBlockAccesses(B);
1838 assert(AL != nullptr && "Asking to renumber an empty block");
1839 for (const auto &I : *AL)
1840 BlockNumbering[&I] = ++CurrentNumber;
1841 BlockNumberingValid.insert(B);
1842 }
1843
1844 /// Determine, for two memory accesses in the same block,
1845 /// whether \p Dominator dominates \p Dominatee.
1846 /// \returns True if \p Dominator dominates \p Dominatee.
locallyDominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const1847 bool MemorySSA::locallyDominates(const MemoryAccess *Dominator,
1848 const MemoryAccess *Dominatee) const {
1849 const BasicBlock *DominatorBlock = Dominator->getBlock();
1850
1851 assert((DominatorBlock == Dominatee->getBlock()) &&
1852 "Asking for local domination when accesses are in different blocks!");
1853 // A node dominates itself.
1854 if (Dominatee == Dominator)
1855 return true;
1856
1857 // When Dominatee is defined on function entry, it is not dominated by another
1858 // memory access.
1859 if (isLiveOnEntryDef(Dominatee))
1860 return false;
1861
1862 // When Dominator is defined on function entry, it dominates the other memory
1863 // access.
1864 if (isLiveOnEntryDef(Dominator))
1865 return true;
1866
1867 if (!BlockNumberingValid.count(DominatorBlock))
1868 renumberBlock(DominatorBlock);
1869
1870 unsigned long DominatorNum = BlockNumbering.lookup(Dominator);
1871 // All numbers start with 1
1872 assert(DominatorNum != 0 && "Block was not numbered properly");
1873 unsigned long DominateeNum = BlockNumbering.lookup(Dominatee);
1874 assert(DominateeNum != 0 && "Block was not numbered properly");
1875 return DominatorNum < DominateeNum;
1876 }
1877
dominates(const MemoryAccess * Dominator,const MemoryAccess * Dominatee) const1878 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1879 const MemoryAccess *Dominatee) const {
1880 if (Dominator == Dominatee)
1881 return true;
1882
1883 if (isLiveOnEntryDef(Dominatee))
1884 return false;
1885
1886 if (Dominator->getBlock() != Dominatee->getBlock())
1887 return DT->dominates(Dominator->getBlock(), Dominatee->getBlock());
1888 return locallyDominates(Dominator, Dominatee);
1889 }
1890
dominates(const MemoryAccess * Dominator,const Use & Dominatee) const1891 bool MemorySSA::dominates(const MemoryAccess *Dominator,
1892 const Use &Dominatee) const {
1893 if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Dominatee.getUser())) {
1894 BasicBlock *UseBB = MP->getIncomingBlock(Dominatee);
1895 // The def must dominate the incoming block of the phi.
1896 if (UseBB != Dominator->getBlock())
1897 return DT->dominates(Dominator->getBlock(), UseBB);
1898 // If the UseBB and the DefBB are the same, compare locally.
1899 return locallyDominates(Dominator, cast<MemoryAccess>(Dominatee));
1900 }
1901 // If it's not a PHI node use, the normal dominates can already handle it.
1902 return dominates(Dominator, cast<MemoryAccess>(Dominatee.getUser()));
1903 }
1904
1905 const static char LiveOnEntryStr[] = "liveOnEntry";
1906
print(raw_ostream & OS) const1907 void MemoryAccess::print(raw_ostream &OS) const {
1908 switch (getValueID()) {
1909 case MemoryPhiVal: return static_cast<const MemoryPhi *>(this)->print(OS);
1910 case MemoryDefVal: return static_cast<const MemoryDef *>(this)->print(OS);
1911 case MemoryUseVal: return static_cast<const MemoryUse *>(this)->print(OS);
1912 }
1913 llvm_unreachable("invalid value id");
1914 }
1915
print(raw_ostream & OS) const1916 void MemoryDef::print(raw_ostream &OS) const {
1917 MemoryAccess *UO = getDefiningAccess();
1918
1919 auto printID = [&OS](MemoryAccess *A) {
1920 if (A && A->getID())
1921 OS << A->getID();
1922 else
1923 OS << LiveOnEntryStr;
1924 };
1925
1926 OS << getID() << " = MemoryDef(";
1927 printID(UO);
1928 OS << ")";
1929
1930 if (isOptimized()) {
1931 OS << "->";
1932 printID(getOptimized());
1933
1934 if (Optional<AliasResult> AR = getOptimizedAccessType())
1935 OS << " " << *AR;
1936 }
1937 }
1938
print(raw_ostream & OS) const1939 void MemoryPhi::print(raw_ostream &OS) const {
1940 bool First = true;
1941 OS << getID() << " = MemoryPhi(";
1942 for (const auto &Op : operands()) {
1943 BasicBlock *BB = getIncomingBlock(Op);
1944 MemoryAccess *MA = cast<MemoryAccess>(Op);
1945 if (!First)
1946 OS << ',';
1947 else
1948 First = false;
1949
1950 OS << '{';
1951 if (BB->hasName())
1952 OS << BB->getName();
1953 else
1954 BB->printAsOperand(OS, false);
1955 OS << ',';
1956 if (unsigned ID = MA->getID())
1957 OS << ID;
1958 else
1959 OS << LiveOnEntryStr;
1960 OS << '}';
1961 }
1962 OS << ')';
1963 }
1964
print(raw_ostream & OS) const1965 void MemoryUse::print(raw_ostream &OS) const {
1966 MemoryAccess *UO = getDefiningAccess();
1967 OS << "MemoryUse(";
1968 if (UO && UO->getID())
1969 OS << UO->getID();
1970 else
1971 OS << LiveOnEntryStr;
1972 OS << ')';
1973
1974 if (Optional<AliasResult> AR = getOptimizedAccessType())
1975 OS << " " << *AR;
1976 }
1977
dump() const1978 void MemoryAccess::dump() const {
1979 // Cannot completely remove virtual function even in release mode.
1980 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1981 print(dbgs());
1982 dbgs() << "\n";
1983 #endif
1984 }
1985
1986 char MemorySSAPrinterLegacyPass::ID = 0;
1987
MemorySSAPrinterLegacyPass()1988 MemorySSAPrinterLegacyPass::MemorySSAPrinterLegacyPass() : FunctionPass(ID) {
1989 initializeMemorySSAPrinterLegacyPassPass(*PassRegistry::getPassRegistry());
1990 }
1991
getAnalysisUsage(AnalysisUsage & AU) const1992 void MemorySSAPrinterLegacyPass::getAnalysisUsage(AnalysisUsage &AU) const {
1993 AU.setPreservesAll();
1994 AU.addRequired<MemorySSAWrapperPass>();
1995 }
1996
runOnFunction(Function & F)1997 bool MemorySSAPrinterLegacyPass::runOnFunction(Function &F) {
1998 auto &MSSA = getAnalysis<MemorySSAWrapperPass>().getMSSA();
1999 MSSA.print(dbgs());
2000 if (VerifyMemorySSA)
2001 MSSA.verifyMemorySSA();
2002 return false;
2003 }
2004
2005 AnalysisKey MemorySSAAnalysis::Key;
2006
run(Function & F,FunctionAnalysisManager & AM)2007 MemorySSAAnalysis::Result MemorySSAAnalysis::run(Function &F,
2008 FunctionAnalysisManager &AM) {
2009 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
2010 auto &AA = AM.getResult<AAManager>(F);
2011 return MemorySSAAnalysis::Result(llvm::make_unique<MemorySSA>(F, &AA, &DT));
2012 }
2013
run(Function & F,FunctionAnalysisManager & AM)2014 PreservedAnalyses MemorySSAPrinterPass::run(Function &F,
2015 FunctionAnalysisManager &AM) {
2016 OS << "MemorySSA for function: " << F.getName() << "\n";
2017 AM.getResult<MemorySSAAnalysis>(F).getMSSA().print(OS);
2018
2019 return PreservedAnalyses::all();
2020 }
2021
run(Function & F,FunctionAnalysisManager & AM)2022 PreservedAnalyses MemorySSAVerifierPass::run(Function &F,
2023 FunctionAnalysisManager &AM) {
2024 AM.getResult<MemorySSAAnalysis>(F).getMSSA().verifyMemorySSA();
2025
2026 return PreservedAnalyses::all();
2027 }
2028
2029 char MemorySSAWrapperPass::ID = 0;
2030
MemorySSAWrapperPass()2031 MemorySSAWrapperPass::MemorySSAWrapperPass() : FunctionPass(ID) {
2032 initializeMemorySSAWrapperPassPass(*PassRegistry::getPassRegistry());
2033 }
2034
releaseMemory()2035 void MemorySSAWrapperPass::releaseMemory() { MSSA.reset(); }
2036
getAnalysisUsage(AnalysisUsage & AU) const2037 void MemorySSAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2038 AU.setPreservesAll();
2039 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2040 AU.addRequiredTransitive<AAResultsWrapperPass>();
2041 }
2042
runOnFunction(Function & F)2043 bool MemorySSAWrapperPass::runOnFunction(Function &F) {
2044 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2045 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2046 MSSA.reset(new MemorySSA(F, &AA, &DT));
2047 return false;
2048 }
2049
verifyAnalysis() const2050 void MemorySSAWrapperPass::verifyAnalysis() const { MSSA->verifyMemorySSA(); }
2051
print(raw_ostream & OS,const Module * M) const2052 void MemorySSAWrapperPass::print(raw_ostream &OS, const Module *M) const {
2053 MSSA->print(OS);
2054 }
2055
MemorySSAWalker(MemorySSA * M)2056 MemorySSAWalker::MemorySSAWalker(MemorySSA *M) : MSSA(M) {}
2057
CachingWalker(MemorySSA * M,AliasAnalysis * A,DominatorTree * D)2058 MemorySSA::CachingWalker::CachingWalker(MemorySSA *M, AliasAnalysis *A,
2059 DominatorTree *D)
2060 : MemorySSAWalker(M), Walker(*M, *A, *D) {}
2061
invalidateInfo(MemoryAccess * MA)2062 void MemorySSA::CachingWalker::invalidateInfo(MemoryAccess *MA) {
2063 if (auto *MUD = dyn_cast<MemoryUseOrDef>(MA))
2064 MUD->resetOptimized();
2065 }
2066
2067 /// Walk the use-def chains starting at \p MA and find
2068 /// the MemoryAccess that actually clobbers Loc.
2069 ///
2070 /// \returns our clobbering memory access
getClobberingMemoryAccess(MemoryAccess * StartingAccess,UpwardsMemoryQuery & Q)2071 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2072 MemoryAccess *StartingAccess, UpwardsMemoryQuery &Q) {
2073 return Walker.findClobber(StartingAccess, Q);
2074 }
2075
getClobberingMemoryAccess(MemoryAccess * StartingAccess,const MemoryLocation & Loc)2076 MemoryAccess *MemorySSA::CachingWalker::getClobberingMemoryAccess(
2077 MemoryAccess *StartingAccess, const MemoryLocation &Loc) {
2078 if (isa<MemoryPhi>(StartingAccess))
2079 return StartingAccess;
2080
2081 auto *StartingUseOrDef = cast<MemoryUseOrDef>(StartingAccess);
2082 if (MSSA->isLiveOnEntryDef(StartingUseOrDef))
2083 return StartingUseOrDef;
2084
2085 Instruction *I = StartingUseOrDef->getMemoryInst();
2086
2087 // Conservatively, fences are always clobbers, so don't perform the walk if we
2088 // hit a fence.
2089 if (!ImmutableCallSite(I) && I->isFenceLike())
2090 return StartingUseOrDef;
2091
2092 UpwardsMemoryQuery Q;
2093 Q.OriginalAccess = StartingUseOrDef;
2094 Q.StartingLoc = Loc;
2095 Q.Inst = I;
2096 Q.IsCall = false;
2097
2098 // Unlike the other function, do not walk to the def of a def, because we are
2099 // handed something we already believe is the clobbering access.
2100 MemoryAccess *DefiningAccess = isa<MemoryUse>(StartingUseOrDef)
2101 ? StartingUseOrDef->getDefiningAccess()
2102 : StartingUseOrDef;
2103
2104 MemoryAccess *Clobber = getClobberingMemoryAccess(DefiningAccess, Q);
2105 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2106 LLVM_DEBUG(dbgs() << *StartingUseOrDef << "\n");
2107 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2108 LLVM_DEBUG(dbgs() << *Clobber << "\n");
2109 return Clobber;
2110 }
2111
2112 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA)2113 MemorySSA::CachingWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2114 auto *StartingAccess = dyn_cast<MemoryUseOrDef>(MA);
2115 // If this is a MemoryPhi, we can't do anything.
2116 if (!StartingAccess)
2117 return MA;
2118
2119 // If this is an already optimized use or def, return the optimized result.
2120 // Note: Currently, we store the optimized def result in a separate field,
2121 // since we can't use the defining access.
2122 if (StartingAccess->isOptimized())
2123 return StartingAccess->getOptimized();
2124
2125 const Instruction *I = StartingAccess->getMemoryInst();
2126 UpwardsMemoryQuery Q(I, StartingAccess);
2127 // We can't sanely do anything with a fence, since they conservatively clobber
2128 // all memory, and have no locations to get pointers from to try to
2129 // disambiguate.
2130 if (!Q.IsCall && I->isFenceLike())
2131 return StartingAccess;
2132
2133 if (isUseTriviallyOptimizableToLiveOnEntry(*MSSA->AA, I)) {
2134 MemoryAccess *LiveOnEntry = MSSA->getLiveOnEntryDef();
2135 StartingAccess->setOptimized(LiveOnEntry);
2136 StartingAccess->setOptimizedAccessType(None);
2137 return LiveOnEntry;
2138 }
2139
2140 // Start with the thing we already think clobbers this location
2141 MemoryAccess *DefiningAccess = StartingAccess->getDefiningAccess();
2142
2143 // At this point, DefiningAccess may be the live on entry def.
2144 // If it is, we will not get a better result.
2145 if (MSSA->isLiveOnEntryDef(DefiningAccess)) {
2146 StartingAccess->setOptimized(DefiningAccess);
2147 StartingAccess->setOptimizedAccessType(None);
2148 return DefiningAccess;
2149 }
2150
2151 MemoryAccess *Result = getClobberingMemoryAccess(DefiningAccess, Q);
2152 LLVM_DEBUG(dbgs() << "Starting Memory SSA clobber for " << *I << " is ");
2153 LLVM_DEBUG(dbgs() << *DefiningAccess << "\n");
2154 LLVM_DEBUG(dbgs() << "Final Memory SSA clobber for " << *I << " is ");
2155 LLVM_DEBUG(dbgs() << *Result << "\n");
2156
2157 StartingAccess->setOptimized(Result);
2158 if (MSSA->isLiveOnEntryDef(Result))
2159 StartingAccess->setOptimizedAccessType(None);
2160 else if (Q.AR == MustAlias)
2161 StartingAccess->setOptimizedAccessType(MustAlias);
2162
2163 return Result;
2164 }
2165
2166 MemoryAccess *
getClobberingMemoryAccess(MemoryAccess * MA)2167 DoNothingMemorySSAWalker::getClobberingMemoryAccess(MemoryAccess *MA) {
2168 if (auto *Use = dyn_cast<MemoryUseOrDef>(MA))
2169 return Use->getDefiningAccess();
2170 return MA;
2171 }
2172
getClobberingMemoryAccess(MemoryAccess * StartingAccess,const MemoryLocation &)2173 MemoryAccess *DoNothingMemorySSAWalker::getClobberingMemoryAccess(
2174 MemoryAccess *StartingAccess, const MemoryLocation &) {
2175 if (auto *Use = dyn_cast<MemoryUseOrDef>(StartingAccess))
2176 return Use->getDefiningAccess();
2177 return StartingAccess;
2178 }
2179
deleteMe(DerivedUser * Self)2180 void MemoryPhi::deleteMe(DerivedUser *Self) {
2181 delete static_cast<MemoryPhi *>(Self);
2182 }
2183
deleteMe(DerivedUser * Self)2184 void MemoryDef::deleteMe(DerivedUser *Self) {
2185 delete static_cast<MemoryDef *>(Self);
2186 }
2187
deleteMe(DerivedUser * Self)2188 void MemoryUse::deleteMe(DerivedUser *Self) {
2189 delete static_cast<MemoryUse *>(Self);
2190 }
2191