• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- MemorySSA.h - Build Memory SSA ---------------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // \file
11 // \brief This file exposes an interface to building/using memory SSA to
12 // walk memory instructions using a use/def graph.
13 //
14 // Memory SSA class builds an SSA form that links together memory access
15 // instructions such as loads, stores, atomics, and calls. Additionally, it does
16 // a trivial form of "heap versioning" Every time the memory state changes in
17 // the program, we generate a new heap version. It generates MemoryDef/Uses/Phis
18 // that are overlayed on top of the existing instructions.
19 //
20 // As a trivial example,
21 // define i32 @main() #0 {
22 // entry:
23 //   %call = call noalias i8* @_Znwm(i64 4) #2
24 //   %0 = bitcast i8* %call to i32*
25 //   %call1 = call noalias i8* @_Znwm(i64 4) #2
26 //   %1 = bitcast i8* %call1 to i32*
27 //   store i32 5, i32* %0, align 4
28 //   store i32 7, i32* %1, align 4
29 //   %2 = load i32* %0, align 4
30 //   %3 = load i32* %1, align 4
31 //   %add = add nsw i32 %2, %3
32 //   ret i32 %add
33 // }
34 //
35 // Will become
36 // define i32 @main() #0 {
37 // entry:
38 //   ; 1 = MemoryDef(0)
39 //   %call = call noalias i8* @_Znwm(i64 4) #3
40 //   %2 = bitcast i8* %call to i32*
41 //   ; 2 = MemoryDef(1)
42 //   %call1 = call noalias i8* @_Znwm(i64 4) #3
43 //   %4 = bitcast i8* %call1 to i32*
44 //   ; 3 = MemoryDef(2)
45 //   store i32 5, i32* %2, align 4
46 //   ; 4 = MemoryDef(3)
47 //   store i32 7, i32* %4, align 4
48 //   ; MemoryUse(3)
49 //   %7 = load i32* %2, align 4
50 //   ; MemoryUse(4)
51 //   %8 = load i32* %4, align 4
52 //   %add = add nsw i32 %7, %8
53 //   ret i32 %add
54 // }
55 //
56 // Given this form, all the stores that could ever effect the load at %8 can be
57 // gotten by using the MemoryUse associated with it, and walking from use to def
58 // until you hit the top of the function.
59 //
60 // Each def also has a list of users associated with it, so you can walk from
61 // both def to users, and users to defs. Note that we disambiguate MemoryUses,
62 // but not the RHS of MemoryDefs. You can see this above at %7, which would
63 // otherwise be a MemoryUse(4). Being disambiguated means that for a given
64 // store, all the MemoryUses on its use lists are may-aliases of that store (but
65 // the MemoryDefs on its use list may not be).
66 //
67 // MemoryDefs are not disambiguated because it would require multiple reaching
68 // definitions, which would require multiple phis, and multiple memoryaccesses
69 // per instruction.
70 //===----------------------------------------------------------------------===//
71 
72 #ifndef LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
73 #define LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
74 
75 #include "llvm/ADT/DenseMap.h"
76 #include "llvm/ADT/GraphTraits.h"
77 #include "llvm/ADT/SmallPtrSet.h"
78 #include "llvm/ADT/SmallVector.h"
79 #include "llvm/ADT/ilist.h"
80 #include "llvm/ADT/ilist_node.h"
81 #include "llvm/ADT/iterator.h"
82 #include "llvm/Analysis/AliasAnalysis.h"
83 #include "llvm/Analysis/MemoryLocation.h"
84 #include "llvm/Analysis/PHITransAddr.h"
85 #include "llvm/IR/BasicBlock.h"
86 #include "llvm/IR/Dominators.h"
87 #include "llvm/IR/Module.h"
88 #include "llvm/IR/OperandTraits.h"
89 #include "llvm/IR/Type.h"
90 #include "llvm/IR/Use.h"
91 #include "llvm/IR/User.h"
92 #include "llvm/IR/Value.h"
93 #include "llvm/Pass.h"
94 #include "llvm/PassAnalysisSupport.h"
95 #include "llvm/Support/Casting.h"
96 #include "llvm/Support/Compiler.h"
97 #include "llvm/Support/ErrorHandling.h"
98 #include <algorithm>
99 #include <cassert>
100 #include <cstddef>
101 #include <iterator>
102 #include <memory>
103 #include <utility>
104 
105 namespace llvm {
106 
107 class DominatorTree;
108 class Function;
109 class Instruction;
110 class MemoryAccess;
111 class LLVMContext;
112 class raw_ostream;
113 
114 template <class T> class memoryaccess_def_iterator_base;
115 using memoryaccess_def_iterator = memoryaccess_def_iterator_base<MemoryAccess>;
116 using const_memoryaccess_def_iterator =
117     memoryaccess_def_iterator_base<const MemoryAccess>;
118 
119 // \brief The base for all memory accesses. All memory accesses in a block are
120 // linked together using an intrusive list.
121 class MemoryAccess : public User, public ilist_node<MemoryAccess> {
122   void *operator new(size_t, unsigned) = delete;
123   void *operator new(size_t) = delete;
124 
125 public:
126   // Methods for support type inquiry through isa, cast, and
127   // dyn_cast
classof(const MemoryAccess *)128   static inline bool classof(const MemoryAccess *) { return true; }
classof(const Value * V)129   static inline bool classof(const Value *V) {
130     unsigned ID = V->getValueID();
131     return ID == MemoryUseVal || ID == MemoryPhiVal || ID == MemoryDefVal;
132   }
133 
134   ~MemoryAccess() override;
135 
getBlock()136   BasicBlock *getBlock() const { return Block; }
137 
138   virtual void print(raw_ostream &OS) const = 0;
139   virtual void dump() const;
140 
141   /// \brief The user iterators for a memory access
142   typedef user_iterator iterator;
143   typedef const_user_iterator const_iterator;
144 
145   /// \brief This iterator walks over all of the defs in a given
146   /// MemoryAccess. For MemoryPhi nodes, this walks arguments. For
147   /// MemoryUse/MemoryDef, this walks the defining access.
148   memoryaccess_def_iterator defs_begin();
149   const_memoryaccess_def_iterator defs_begin() const;
150   memoryaccess_def_iterator defs_end();
151   const_memoryaccess_def_iterator defs_end() const;
152 
153 protected:
154   friend class MemorySSA;
155   friend class MemoryUseOrDef;
156   friend class MemoryUse;
157   friend class MemoryDef;
158   friend class MemoryPhi;
159 
160   /// \brief Used internally to give IDs to MemoryAccesses for printing
161   virtual unsigned getID() const = 0;
162 
MemoryAccess(LLVMContext & C,unsigned Vty,BasicBlock * BB,unsigned NumOperands)163   MemoryAccess(LLVMContext &C, unsigned Vty, BasicBlock *BB,
164                unsigned NumOperands)
165       : User(Type::getVoidTy(C), Vty, nullptr, NumOperands), Block(BB) {}
166 
167 private:
168   MemoryAccess(const MemoryAccess &);
169   void operator=(const MemoryAccess &);
170   BasicBlock *Block;
171 };
172 
173 template <>
174 struct ilist_traits<MemoryAccess> : public ilist_default_traits<MemoryAccess> {
175   /// See details of the instruction class for why this trick works
176   // FIXME: This downcast is UB. See llvm.org/PR26753.
177   LLVM_NO_SANITIZE("object-size")
178   MemoryAccess *createSentinel() const {
179     return static_cast<MemoryAccess *>(&Sentinel);
180   }
181 
182   static void destroySentinel(MemoryAccess *) {}
183 
184   MemoryAccess *provideInitialHead() const { return createSentinel(); }
185   MemoryAccess *ensureHead(MemoryAccess *) const { return createSentinel(); }
186   static void noteHead(MemoryAccess *, MemoryAccess *) {}
187 
188 private:
189   mutable ilist_half_node<MemoryAccess> Sentinel;
190 };
191 
192 inline raw_ostream &operator<<(raw_ostream &OS, const MemoryAccess &MA) {
193   MA.print(OS);
194   return OS;
195 }
196 
197 /// \brief Class that has the common methods + fields of memory uses/defs. It's
198 /// a little awkward to have, but there are many cases where we want either a
199 /// use or def, and there are many cases where uses are needed (defs aren't
200 /// acceptable), and vice-versa.
201 ///
202 /// This class should never be instantiated directly; make a MemoryUse or
203 /// MemoryDef instead.
204 class MemoryUseOrDef : public MemoryAccess {
205   void *operator new(size_t, unsigned) = delete;
206   void *operator new(size_t) = delete;
207 
208 public:
209   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
210 
211   /// \brief Get the instruction that this MemoryUse represents.
212   Instruction *getMemoryInst() const { return MemoryInst; }
213 
214   /// \brief Get the access that produces the memory state used by this Use.
215   MemoryAccess *getDefiningAccess() const { return getOperand(0); }
216 
217   static inline bool classof(const MemoryUseOrDef *) { return true; }
218   static inline bool classof(const Value *MA) {
219     return MA->getValueID() == MemoryUseVal || MA->getValueID() == MemoryDefVal;
220   }
221 
222 protected:
223   friend class MemorySSA;
224 
225   MemoryUseOrDef(LLVMContext &C, MemoryAccess *DMA, unsigned Vty,
226                  Instruction *MI, BasicBlock *BB)
227       : MemoryAccess(C, Vty, BB, 1), MemoryInst(MI) {
228     setDefiningAccess(DMA);
229   }
230 
231   void setDefiningAccess(MemoryAccess *DMA) { setOperand(0, DMA); }
232 
233 private:
234   Instruction *MemoryInst;
235 };
236 
237 template <>
238 struct OperandTraits<MemoryUseOrDef>
239     : public FixedNumOperandTraits<MemoryUseOrDef, 1> {};
240 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUseOrDef, MemoryAccess)
241 
242 /// \brief Represents read-only accesses to memory
243 ///
244 /// In particular, the set of Instructions that will be represented by
245 /// MemoryUse's is exactly the set of Instructions for which
246 /// AliasAnalysis::getModRefInfo returns "Ref".
247 class MemoryUse final : public MemoryUseOrDef {
248   void *operator new(size_t, unsigned) = delete;
249 
250 public:
251   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
252 
253   // allocate space for exactly one operand
254   void *operator new(size_t s) { return User::operator new(s, 1); }
255 
256   MemoryUse(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB)
257       : MemoryUseOrDef(C, DMA, MemoryUseVal, MI, BB) {}
258 
259   static inline bool classof(const MemoryUse *) { return true; }
260   static inline bool classof(const Value *MA) {
261     return MA->getValueID() == MemoryUseVal;
262   }
263 
264   void print(raw_ostream &OS) const override;
265 
266 protected:
267   friend class MemorySSA;
268 
269   unsigned getID() const override {
270     llvm_unreachable("MemoryUses do not have IDs");
271   }
272 };
273 
274 template <>
275 struct OperandTraits<MemoryUse> : public FixedNumOperandTraits<MemoryUse, 1> {};
276 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryUse, MemoryAccess)
277 
278 /// \brief Represents a read-write access to memory, whether it is a must-alias,
279 /// or a may-alias.
280 ///
281 /// In particular, the set of Instructions that will be represented by
282 /// MemoryDef's is exactly the set of Instructions for which
283 /// AliasAnalysis::getModRefInfo returns "Mod" or "ModRef".
284 /// Note that, in order to provide def-def chains, all defs also have a use
285 /// associated with them. This use points to the nearest reaching
286 /// MemoryDef/MemoryPhi.
287 class MemoryDef final : public MemoryUseOrDef {
288   void *operator new(size_t, unsigned) = delete;
289 
290 public:
291   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
292 
293   // allocate space for exactly one operand
294   void *operator new(size_t s) { return User::operator new(s, 1); }
295 
296   MemoryDef(LLVMContext &C, MemoryAccess *DMA, Instruction *MI, BasicBlock *BB,
297             unsigned Ver)
298       : MemoryUseOrDef(C, DMA, MemoryDefVal, MI, BB), ID(Ver) {}
299 
300   static inline bool classof(const MemoryDef *) { return true; }
301   static inline bool classof(const Value *MA) {
302     return MA->getValueID() == MemoryDefVal;
303   }
304 
305   void print(raw_ostream &OS) const override;
306 
307 protected:
308   friend class MemorySSA;
309 
310   // For debugging only. This gets used to give memory accesses pretty numbers
311   // when printing them out
312   unsigned getID() const override { return ID; }
313 
314 private:
315   const unsigned ID;
316 };
317 
318 template <>
319 struct OperandTraits<MemoryDef> : public FixedNumOperandTraits<MemoryDef, 1> {};
320 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryDef, MemoryAccess)
321 
322 /// \brief Represents phi nodes for memory accesses.
323 ///
324 /// These have the same semantic as regular phi nodes, with the exception that
325 /// only one phi will ever exist in a given basic block.
326 /// Guaranteeing one phi per block means guaranteeing there is only ever one
327 /// valid reaching MemoryDef/MemoryPHI along each path to the phi node.
328 /// This is ensured by not allowing disambiguation of the RHS of a MemoryDef or
329 /// a MemoryPhi's operands.
330 /// That is, given
331 /// if (a) {
332 ///   store %a
333 ///   store %b
334 /// }
335 /// it *must* be transformed into
336 /// if (a) {
337 ///    1 = MemoryDef(liveOnEntry)
338 ///    store %a
339 ///    2 = MemoryDef(1)
340 ///    store %b
341 /// }
342 /// and *not*
343 /// if (a) {
344 ///    1 = MemoryDef(liveOnEntry)
345 ///    store %a
346 ///    2 = MemoryDef(liveOnEntry)
347 ///    store %b
348 /// }
349 /// even if the two stores do not conflict. Otherwise, both 1 and 2 reach the
350 /// end of the branch, and if there are not two phi nodes, one will be
351 /// disconnected completely from the SSA graph below that point.
352 /// Because MemoryUse's do not generate new definitions, they do not have this
353 /// issue.
354 class MemoryPhi final : public MemoryAccess {
355   void *operator new(size_t, unsigned) = delete;
356   // allocate space for exactly zero operands
357   void *operator new(size_t s) { return User::operator new(s); }
358 
359 public:
360   /// Provide fast operand accessors
361   DECLARE_TRANSPARENT_OPERAND_ACCESSORS(MemoryAccess);
362 
363   MemoryPhi(LLVMContext &C, BasicBlock *BB, unsigned Ver, unsigned NumPreds = 0)
364       : MemoryAccess(C, MemoryPhiVal, BB, 0), ID(Ver), ReservedSpace(NumPreds) {
365     allocHungoffUses(ReservedSpace);
366   }
367 
368   // Block iterator interface. This provides access to the list of incoming
369   // basic blocks, which parallels the list of incoming values.
370   typedef BasicBlock **block_iterator;
371   typedef BasicBlock *const *const_block_iterator;
372 
373   block_iterator block_begin() {
374     auto *Ref = reinterpret_cast<Use::UserRef *>(op_begin() + ReservedSpace);
375     return reinterpret_cast<block_iterator>(Ref + 1);
376   }
377 
378   const_block_iterator block_begin() const {
379     const auto *Ref =
380         reinterpret_cast<const Use::UserRef *>(op_begin() + ReservedSpace);
381     return reinterpret_cast<const_block_iterator>(Ref + 1);
382   }
383 
384   block_iterator block_end() { return block_begin() + getNumOperands(); }
385 
386   const_block_iterator block_end() const {
387     return block_begin() + getNumOperands();
388   }
389 
390   op_range incoming_values() { return operands(); }
391 
392   const_op_range incoming_values() const { return operands(); }
393 
394   /// \brief Return the number of incoming edges
395   unsigned getNumIncomingValues() const { return getNumOperands(); }
396 
397   /// \brief Return incoming value number x
398   MemoryAccess *getIncomingValue(unsigned I) const { return getOperand(I); }
399   void setIncomingValue(unsigned I, MemoryAccess *V) {
400     assert(V && "PHI node got a null value!");
401     setOperand(I, V);
402   }
403   static unsigned getOperandNumForIncomingValue(unsigned I) { return I; }
404   static unsigned getIncomingValueNumForOperand(unsigned I) { return I; }
405 
406   /// \brief Return incoming basic block number @p i.
407   BasicBlock *getIncomingBlock(unsigned I) const { return block_begin()[I]; }
408 
409   /// \brief Return incoming basic block corresponding
410   /// to an operand of the PHI.
411   BasicBlock *getIncomingBlock(const Use &U) const {
412     assert(this == U.getUser() && "Iterator doesn't point to PHI's Uses?");
413     return getIncomingBlock(unsigned(&U - op_begin()));
414   }
415 
416   /// \brief Return incoming basic block corresponding
417   /// to value use iterator.
418   BasicBlock *getIncomingBlock(MemoryAccess::const_user_iterator I) const {
419     return getIncomingBlock(I.getUse());
420   }
421 
422   void setIncomingBlock(unsigned I, BasicBlock *BB) {
423     assert(BB && "PHI node got a null basic block!");
424     block_begin()[I] = BB;
425   }
426 
427   /// \brief Add an incoming value to the end of the PHI list
428   void addIncoming(MemoryAccess *V, BasicBlock *BB) {
429     if (getNumOperands() == ReservedSpace)
430       growOperands(); // Get more space!
431     // Initialize some new operands.
432     setNumHungOffUseOperands(getNumOperands() + 1);
433     setIncomingValue(getNumOperands() - 1, V);
434     setIncomingBlock(getNumOperands() - 1, BB);
435   }
436 
437   /// \brief Return the first index of the specified basic
438   /// block in the value list for this PHI.  Returns -1 if no instance.
439   int getBasicBlockIndex(const BasicBlock *BB) const {
440     for (unsigned I = 0, E = getNumOperands(); I != E; ++I)
441       if (block_begin()[I] == BB)
442         return I;
443     return -1;
444   }
445 
446   Value *getIncomingValueForBlock(const BasicBlock *BB) const {
447     int Idx = getBasicBlockIndex(BB);
448     assert(Idx >= 0 && "Invalid basic block argument!");
449     return getIncomingValue(Idx);
450   }
451 
452   static inline bool classof(const MemoryPhi *) { return true; }
453   static inline bool classof(const Value *V) {
454     return V->getValueID() == MemoryPhiVal;
455   }
456 
457   void print(raw_ostream &OS) const override;
458 
459 protected:
460   friend class MemorySSA;
461   /// \brief this is more complicated than the generic
462   /// User::allocHungoffUses, because we have to allocate Uses for the incoming
463   /// values and pointers to the incoming blocks, all in one allocation.
464   void allocHungoffUses(unsigned N) {
465     User::allocHungoffUses(N, /* IsPhi */ true);
466   }
467 
468   /// For debugging only. This gets used to give memory accesses pretty numbers
469   /// when printing them out
470   unsigned getID() const final { return ID; }
471 
472 private:
473   // For debugging only
474   const unsigned ID;
475   unsigned ReservedSpace;
476 
477   /// \brief This grows the operand list in response to a push_back style of
478   /// operation.  This grows the number of ops by 1.5 times.
479   void growOperands() {
480     unsigned E = getNumOperands();
481     // 2 op PHI nodes are VERY common, so reserve at least enough for that.
482     ReservedSpace = std::max(E + E / 2, 2u);
483     growHungoffUses(ReservedSpace, /* IsPhi */ true);
484   }
485 };
486 
487 template <> struct OperandTraits<MemoryPhi> : public HungoffOperandTraits<2> {};
488 DEFINE_TRANSPARENT_OPERAND_ACCESSORS(MemoryPhi, MemoryAccess)
489 
490 class MemorySSAWalker;
491 
492 /// \brief Encapsulates MemorySSA, including all data associated with memory
493 /// accesses.
494 class MemorySSA {
495 public:
496   MemorySSA(Function &, AliasAnalysis *, DominatorTree *);
497   MemorySSA(MemorySSA &&);
498   ~MemorySSA();
499 
500   MemorySSAWalker *getWalker();
501 
502   /// \brief Given a memory Mod/Ref'ing instruction, get the MemorySSA
503   /// access associated with it. If passed a basic block gets the memory phi
504   /// node that exists for that block, if there is one. Otherwise, this will get
505   /// a MemoryUseOrDef.
506   MemoryAccess *getMemoryAccess(const Value *) const;
507   MemoryPhi *getMemoryAccess(const BasicBlock *BB) const;
508 
509   void dump() const;
510   void print(raw_ostream &) const;
511 
512   /// \brief Return true if \p MA represents the live on entry value
513   ///
514   /// Loads and stores from pointer arguments and other global values may be
515   /// defined by memory operations that do not occur in the current function, so
516   /// they may be live on entry to the function. MemorySSA represents such
517   /// memory state by the live on entry definition, which is guaranteed to occur
518   /// before any other memory access in the function.
519   inline bool isLiveOnEntryDef(const MemoryAccess *MA) const {
520     return MA == LiveOnEntryDef.get();
521   }
522 
523   inline MemoryAccess *getLiveOnEntryDef() const {
524     return LiveOnEntryDef.get();
525   }
526 
527   using AccessList = iplist<MemoryAccess>;
528 
529   /// \brief Return the list of MemoryAccess's for a given basic block.
530   ///
531   /// This list is not modifiable by the user.
532   const AccessList *getBlockAccesses(const BasicBlock *BB) const {
533     auto It = PerBlockAccesses.find(BB);
534     return It == PerBlockAccesses.end() ? nullptr : It->second.get();
535   }
536 
537   /// \brief Create an empty MemoryPhi in MemorySSA
538   MemoryPhi *createMemoryPhi(BasicBlock *BB);
539 
540   enum InsertionPlace { Beginning, End };
541 
542   /// \brief Create a MemoryAccess in MemorySSA at a specified point in a block,
543   /// with a specified clobbering definition.
544   ///
545   /// Returns the new MemoryAccess.
546   /// This should be called when a memory instruction is created that is being
547   /// used to replace an existing memory instruction. It will *not* create PHI
548   /// nodes, or verify the clobbering definition. The insertion place is used
549   /// solely to determine where in the memoryssa access lists the instruction
550   /// will be placed. The caller is expected to keep ordering the same as
551   /// instructions.
552   /// It will return the new MemoryAccess.
553   MemoryAccess *createMemoryAccessInBB(Instruction *I, MemoryAccess *Definition,
554                                        const BasicBlock *BB,
555                                        InsertionPlace Point);
556   /// \brief Create a MemoryAccess in MemorySSA before or after an existing
557   /// MemoryAccess.
558   ///
559   /// Returns the new MemoryAccess.
560   /// This should be called when a memory instruction is created that is being
561   /// used to replace an existing memory instruction. It will *not* create PHI
562   /// nodes, or verify the clobbering definition.  The clobbering definition
563   /// must be non-null.
564   MemoryAccess *createMemoryAccessBefore(Instruction *I,
565                                          MemoryAccess *Definition,
566                                          MemoryAccess *InsertPt);
567   MemoryAccess *createMemoryAccessAfter(Instruction *I,
568                                         MemoryAccess *Definition,
569                                         MemoryAccess *InsertPt);
570 
571   /// \brief Remove a MemoryAccess from MemorySSA, including updating all
572   /// definitions and uses.
573   /// This should be called when a memory instruction that has a MemoryAccess
574   /// associated with it is erased from the program.  For example, if a store or
575   /// load is simply erased (not replaced), removeMemoryAccess should be called
576   /// on the MemoryAccess for that store/load.
577   void removeMemoryAccess(MemoryAccess *);
578 
579   /// \brief Given two memory accesses in the same basic block, determine
580   /// whether MemoryAccess \p A dominates MemoryAccess \p B.
581   bool locallyDominates(const MemoryAccess *A, const MemoryAccess *B) const;
582 
583   /// \brief Verify that MemorySSA is self consistent (IE definitions dominate
584   /// all uses, uses appear in the right places).  This is used by unit tests.
585   void verifyMemorySSA() const;
586 
587 protected:
588   // Used by Memory SSA annotater, dumpers, and wrapper pass
589   friend class MemorySSAAnnotatedWriter;
590   friend class MemorySSAPrinterLegacyPass;
591   void verifyDefUses(Function &F) const;
592   void verifyDomination(Function &F) const;
593   void verifyOrdering(Function &F) const;
594 
595 private:
596   class CachingWalker;
597   void buildMemorySSA();
598   void verifyUseInDefs(MemoryAccess *, MemoryAccess *) const;
599   using AccessMap = DenseMap<const BasicBlock *, std::unique_ptr<AccessList>>;
600 
601   void
602   determineInsertionPoint(const SmallPtrSetImpl<BasicBlock *> &DefiningBlocks);
603   void computeDomLevels(DenseMap<DomTreeNode *, unsigned> &DomLevels);
604   void markUnreachableAsLiveOnEntry(BasicBlock *BB);
605   bool dominatesUse(const MemoryAccess *, const MemoryAccess *) const;
606   MemoryUseOrDef *createNewAccess(Instruction *);
607   MemoryUseOrDef *createDefinedAccess(Instruction *, MemoryAccess *);
608   MemoryAccess *findDominatingDef(BasicBlock *, enum InsertionPlace);
609   void removeFromLookups(MemoryAccess *);
610 
611   MemoryAccess *renameBlock(BasicBlock *, MemoryAccess *);
612   void renamePass(DomTreeNode *, MemoryAccess *IncomingVal,
613                   SmallPtrSet<BasicBlock *, 16> &Visited);
614   AccessList *getOrCreateAccessList(const BasicBlock *);
615   AliasAnalysis *AA;
616   DominatorTree *DT;
617   Function &F;
618 
619   // Memory SSA mappings
620   DenseMap<const Value *, MemoryAccess *> ValueToMemoryAccess;
621   AccessMap PerBlockAccesses;
622   std::unique_ptr<MemoryAccess> LiveOnEntryDef;
623 
624   // Memory SSA building info
625   std::unique_ptr<CachingWalker> Walker;
626   unsigned NextID;
627 };
628 
629 // This pass does eager building and then printing of MemorySSA. It is used by
630 // the tests to be able to build, dump, and verify Memory SSA.
631 class MemorySSAPrinterLegacyPass : public FunctionPass {
632 public:
633   MemorySSAPrinterLegacyPass();
634 
635   static char ID;
636   bool runOnFunction(Function &) override;
637   void getAnalysisUsage(AnalysisUsage &AU) const override;
638 };
639 
640 /// An analysis that produces \c MemorySSA for a function.
641 ///
642 class MemorySSAAnalysis : public AnalysisInfoMixin<MemorySSAAnalysis> {
643   friend AnalysisInfoMixin<MemorySSAAnalysis>;
644   static char PassID;
645 
646 public:
647   typedef MemorySSA Result;
648 
649   MemorySSA run(Function &F, AnalysisManager<Function> &AM);
650 };
651 
652 /// \brief Printer pass for \c MemorySSA.
653 class MemorySSAPrinterPass : public PassInfoMixin<MemorySSAPrinterPass> {
654   raw_ostream &OS;
655 
656 public:
657   explicit MemorySSAPrinterPass(raw_ostream &OS) : OS(OS) {}
658   PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
659 };
660 
661 /// \brief Verifier pass for \c MemorySSA.
662 struct MemorySSAVerifierPass : PassInfoMixin<MemorySSAVerifierPass> {
663   PreservedAnalyses run(Function &F, AnalysisManager<Function> &AM);
664 };
665 
666 /// \brief Legacy analysis pass which computes \c MemorySSA.
667 class MemorySSAWrapperPass : public FunctionPass {
668 public:
669   MemorySSAWrapperPass();
670 
671   static char ID;
672   bool runOnFunction(Function &) override;
673   void releaseMemory() override;
674   MemorySSA &getMSSA() { return *MSSA; }
675   const MemorySSA &getMSSA() const { return *MSSA; }
676 
677   void getAnalysisUsage(AnalysisUsage &AU) const override;
678 
679   void verifyAnalysis() const override;
680   void print(raw_ostream &OS, const Module *M = nullptr) const override;
681 
682 private:
683   std::unique_ptr<MemorySSA> MSSA;
684 };
685 
686 /// \brief This is the generic walker interface for walkers of MemorySSA.
687 /// Walkers are used to be able to further disambiguate the def-use chains
688 /// MemorySSA gives you, or otherwise produce better info than MemorySSA gives
689 /// you.
690 /// In particular, while the def-use chains provide basic information, and are
691 /// guaranteed to give, for example, the nearest may-aliasing MemoryDef for a
692 /// MemoryUse as AliasAnalysis considers it, a user mant want better or other
693 /// information. In particular, they may want to use SCEV info to further
694 /// disambiguate memory accesses, or they may want the nearest dominating
695 /// may-aliasing MemoryDef for a call or a store. This API enables a
696 /// standardized interface to getting and using that info.
697 class MemorySSAWalker {
698 public:
699   MemorySSAWalker(MemorySSA *);
700   virtual ~MemorySSAWalker() {}
701 
702   using MemoryAccessSet = SmallVector<MemoryAccess *, 8>;
703 
704   /// \brief Given a memory Mod/Ref/ModRef'ing instruction, calling this
705   /// will give you the nearest dominating MemoryAccess that Mod's the location
706   /// the instruction accesses (by skipping any def which AA can prove does not
707   /// alias the location(s) accessed by the instruction given).
708   ///
709   /// Note that this will return a single access, and it must dominate the
710   /// Instruction, so if an operand of a MemoryPhi node Mod's the instruction,
711   /// this will return the MemoryPhi, not the operand. This means that
712   /// given:
713   /// if (a) {
714   ///   1 = MemoryDef(liveOnEntry)
715   ///   store %a
716   /// } else {
717   ///   2 = MemoryDef(liveOnEntry)
718   ///    store %b
719   /// }
720   /// 3 = MemoryPhi(2, 1)
721   /// MemoryUse(3)
722   /// load %a
723   ///
724   /// calling this API on load(%a) will return the MemoryPhi, not the MemoryDef
725   /// in the if (a) branch.
726   virtual MemoryAccess *getClobberingMemoryAccess(const Instruction *) = 0;
727 
728   /// \brief Given a potentially clobbering memory access and a new location,
729   /// calling this will give you the nearest dominating clobbering MemoryAccess
730   /// (by skipping non-aliasing def links).
731   ///
732   /// This version of the function is mainly used to disambiguate phi translated
733   /// pointers, where the value of a pointer may have changed from the initial
734   /// memory access. Note that this expects to be handed either a MemoryUse,
735   /// or an already potentially clobbering access. Unlike the above API, if
736   /// given a MemoryDef that clobbers the pointer as the starting access, it
737   /// will return that MemoryDef, whereas the above would return the clobber
738   /// starting from the use side of  the memory def.
739   virtual MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
740                                                   MemoryLocation &) = 0;
741 
742   /// \brief Given a memory access, invalidate anything this walker knows about
743   /// that access.
744   /// This API is used by walkers that store information to perform basic cache
745   /// invalidation.  This will be called by MemorySSA at appropriate times for
746   /// the walker it uses or returns.
747   virtual void invalidateInfo(MemoryAccess *) {}
748 
749 protected:
750   friend class MemorySSA; // For updating MSSA pointer in MemorySSA move
751                           // constructor.
752   MemorySSA *MSSA;
753 };
754 
755 /// \brief A MemorySSAWalker that does no alias queries, or anything else. It
756 /// simply returns the links as they were constructed by the builder.
757 class DoNothingMemorySSAWalker final : public MemorySSAWalker {
758 public:
759   MemoryAccess *getClobberingMemoryAccess(const Instruction *) override;
760   MemoryAccess *getClobberingMemoryAccess(MemoryAccess *,
761                                           MemoryLocation &) override;
762 };
763 
764 using MemoryAccessPair = std::pair<MemoryAccess *, MemoryLocation>;
765 using ConstMemoryAccessPair = std::pair<const MemoryAccess *, MemoryLocation>;
766 
767 /// \brief Iterator base class used to implement const and non-const iterators
768 /// over the defining accesses of a MemoryAccess.
769 template <class T>
770 class memoryaccess_def_iterator_base
771     : public iterator_facade_base<memoryaccess_def_iterator_base<T>,
772                                   std::forward_iterator_tag, T, ptrdiff_t, T *,
773                                   T *> {
774   using BaseT = typename memoryaccess_def_iterator_base::iterator_facade_base;
775 
776 public:
777   memoryaccess_def_iterator_base(T *Start) : Access(Start), ArgNo(0) {}
778   memoryaccess_def_iterator_base() : Access(nullptr), ArgNo(0) {}
779   bool operator==(const memoryaccess_def_iterator_base &Other) const {
780     return Access == Other.Access && (!Access || ArgNo == Other.ArgNo);
781   }
782 
783   // This is a bit ugly, but for MemoryPHI's, unlike PHINodes, you can't get the
784   // block from the operand in constant time (In a PHINode, the uselist has
785   // both, so it's just subtraction). We provide it as part of the
786   // iterator to avoid callers having to linear walk to get the block.
787   // If the operation becomes constant time on MemoryPHI's, this bit of
788   // abstraction breaking should be removed.
789   BasicBlock *getPhiArgBlock() const {
790     MemoryPhi *MP = dyn_cast<MemoryPhi>(Access);
791     assert(MP && "Tried to get phi arg block when not iterating over a PHI");
792     return MP->getIncomingBlock(ArgNo);
793   }
794   typename BaseT::iterator::pointer operator*() const {
795     assert(Access && "Tried to access past the end of our iterator");
796     // Go to the first argument for phis, and the defining access for everything
797     // else.
798     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access))
799       return MP->getIncomingValue(ArgNo);
800     return cast<MemoryUseOrDef>(Access)->getDefiningAccess();
801   }
802   using BaseT::operator++;
803   memoryaccess_def_iterator &operator++() {
804     assert(Access && "Hit end of iterator");
805     if (MemoryPhi *MP = dyn_cast<MemoryPhi>(Access)) {
806       if (++ArgNo >= MP->getNumIncomingValues()) {
807         ArgNo = 0;
808         Access = nullptr;
809       }
810     } else {
811       Access = nullptr;
812     }
813     return *this;
814   }
815 
816 private:
817   T *Access;
818   unsigned ArgNo;
819 };
820 
821 inline memoryaccess_def_iterator MemoryAccess::defs_begin() {
822   return memoryaccess_def_iterator(this);
823 }
824 
825 inline const_memoryaccess_def_iterator MemoryAccess::defs_begin() const {
826   return const_memoryaccess_def_iterator(this);
827 }
828 
829 inline memoryaccess_def_iterator MemoryAccess::defs_end() {
830   return memoryaccess_def_iterator();
831 }
832 
833 inline const_memoryaccess_def_iterator MemoryAccess::defs_end() const {
834   return const_memoryaccess_def_iterator();
835 }
836 
837 /// \brief GraphTraits for a MemoryAccess, which walks defs in the normal case,
838 /// and uses in the inverse case.
839 template <> struct GraphTraits<MemoryAccess *> {
840   using NodeType = MemoryAccess;
841   using ChildIteratorType = memoryaccess_def_iterator;
842 
843   static NodeType *getEntryNode(NodeType *N) { return N; }
844   static inline ChildIteratorType child_begin(NodeType *N) {
845     return N->defs_begin();
846   }
847   static inline ChildIteratorType child_end(NodeType *N) {
848     return N->defs_end();
849   }
850 };
851 
852 template <> struct GraphTraits<Inverse<MemoryAccess *>> {
853   using NodeType = MemoryAccess;
854   using ChildIteratorType = MemoryAccess::iterator;
855 
856   static NodeType *getEntryNode(NodeType *N) { return N; }
857   static inline ChildIteratorType child_begin(NodeType *N) {
858     return N->user_begin();
859   }
860   static inline ChildIteratorType child_end(NodeType *N) {
861     return N->user_end();
862   }
863 };
864 
865 /// \brief Provide an iterator that walks defs, giving both the memory access,
866 /// and the current pointer location, updating the pointer location as it
867 /// changes due to phi node translation.
868 ///
869 /// This iterator, while somewhat specialized, is what most clients actually
870 /// want when walking upwards through MemorySSA def chains. It takes a pair of
871 /// <MemoryAccess,MemoryLocation>, and walks defs, properly translating the
872 /// memory location through phi nodes for the user.
873 class upward_defs_iterator
874     : public iterator_facade_base<upward_defs_iterator,
875                                   std::forward_iterator_tag,
876                                   const MemoryAccessPair> {
877   using BaseT = upward_defs_iterator::iterator_facade_base;
878 
879 public:
880   upward_defs_iterator(const MemoryAccessPair &Info)
881       : DefIterator(Info.first), Location(Info.second),
882         OriginalAccess(Info.first) {
883     CurrentPair.first = nullptr;
884 
885     WalkingPhi = Info.first && isa<MemoryPhi>(Info.first);
886     fillInCurrentPair();
887   }
888 
889   upward_defs_iterator()
890       : DefIterator(), Location(), OriginalAccess(), WalkingPhi(false) {
891     CurrentPair.first = nullptr;
892   }
893 
894   bool operator==(const upward_defs_iterator &Other) const {
895     return DefIterator == Other.DefIterator;
896   }
897 
898   BaseT::iterator::reference operator*() const {
899     assert(DefIterator != OriginalAccess->defs_end() &&
900            "Tried to access past the end of our iterator");
901     return CurrentPair;
902   }
903 
904   using BaseT::operator++;
905   upward_defs_iterator &operator++() {
906     assert(DefIterator != OriginalAccess->defs_end() &&
907            "Tried to access past the end of the iterator");
908     ++DefIterator;
909     if (DefIterator != OriginalAccess->defs_end())
910       fillInCurrentPair();
911     return *this;
912   }
913 
914   BasicBlock *getPhiArgBlock() const { return DefIterator.getPhiArgBlock(); }
915 
916 private:
917   void fillInCurrentPair() {
918     CurrentPair.first = *DefIterator;
919     if (WalkingPhi && Location.Ptr) {
920       PHITransAddr Translator(
921           const_cast<Value *>(Location.Ptr),
922           OriginalAccess->getBlock()->getModule()->getDataLayout(), nullptr);
923       if (!Translator.PHITranslateValue(OriginalAccess->getBlock(),
924                                         DefIterator.getPhiArgBlock(), nullptr,
925                                         false))
926         if (Translator.getAddr() != Location.Ptr) {
927           CurrentPair.second = Location.getWithNewPtr(Translator.getAddr());
928           return;
929         }
930     }
931     CurrentPair.second = Location;
932   }
933 
934   MemoryAccessPair CurrentPair;
935   memoryaccess_def_iterator DefIterator;
936   MemoryLocation Location;
937   MemoryAccess *OriginalAccess;
938   bool WalkingPhi;
939 };
940 
941 inline upward_defs_iterator upward_defs_begin(const MemoryAccessPair &Pair) {
942   return upward_defs_iterator(Pair);
943 }
944 
945 inline upward_defs_iterator upward_defs_end() { return upward_defs_iterator(); }
946 
947 } // end namespace llvm
948 
949 #endif // LLVM_TRANSFORMS_UTILS_MEMORYSSA_H
950