• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- llvm/Analysis/LoopAccessAnalysis.h -----------------------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the interface for the loop memory dependence framework that
11 // was originally developed for the Loop Vectorizer.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #ifndef LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
16 #define LLVM_ANALYSIS_LOOPACCESSANALYSIS_H
17 
18 #include "llvm/ADT/EquivalenceClasses.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AliasSetTracker.h"
23 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
24 #include "llvm/IR/ValueHandle.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Support/raw_ostream.h"
27 
28 namespace llvm {
29 
30 class Value;
31 class DataLayout;
32 class ScalarEvolution;
33 class Loop;
34 class SCEV;
35 class SCEVUnionPredicate;
36 class LoopAccessInfo;
37 
38 /// Optimization analysis message produced during vectorization. Messages inform
39 /// the user why vectorization did not occur.
40 class LoopAccessReport {
41   std::string Message;
42   const Instruction *Instr;
43 
44 protected:
LoopAccessReport(const Twine & Message,const Instruction * I)45   LoopAccessReport(const Twine &Message, const Instruction *I)
46       : Message(Message.str()), Instr(I) {}
47 
48 public:
Instr(I)49   LoopAccessReport(const Instruction *I = nullptr) : Instr(I) {}
50 
51   template <typename A> LoopAccessReport &operator<<(const A &Value) {
52     raw_string_ostream Out(Message);
53     Out << Value;
54     return *this;
55   }
56 
getInstr()57   const Instruction *getInstr() const { return Instr; }
58 
str()59   std::string &str() { return Message; }
str()60   const std::string &str() const { return Message; }
Twine()61   operator Twine() { return Message; }
62 
63   /// \brief Emit an analysis note for \p PassName with the debug location from
64   /// the instruction in \p Message if available.  Otherwise use the location of
65   /// \p TheLoop.
66   static void emitAnalysis(const LoopAccessReport &Message,
67                            const Function *TheFunction,
68                            const Loop *TheLoop,
69                            const char *PassName);
70 };
71 
72 /// \brief Collection of parameters shared beetween the Loop Vectorizer and the
73 /// Loop Access Analysis.
74 struct VectorizerParams {
75   /// \brief Maximum SIMD width.
76   static const unsigned MaxVectorWidth;
77 
78   /// \brief VF as overridden by the user.
79   static unsigned VectorizationFactor;
80   /// \brief Interleave factor as overridden by the user.
81   static unsigned VectorizationInterleave;
82   /// \brief True if force-vector-interleave was specified by the user.
83   static bool isInterleaveForced();
84 
85   /// \\brief When performing memory disambiguation checks at runtime do not
86   /// make more than this number of comparisons.
87   static unsigned RuntimeMemoryCheckThreshold;
88 };
89 
90 /// \brief Checks memory dependences among accesses to the same underlying
91 /// object to determine whether there vectorization is legal or not (and at
92 /// which vectorization factor).
93 ///
94 /// Note: This class will compute a conservative dependence for access to
95 /// different underlying pointers. Clients, such as the loop vectorizer, will
96 /// sometimes deal these potential dependencies by emitting runtime checks.
97 ///
98 /// We use the ScalarEvolution framework to symbolically evalutate access
99 /// functions pairs. Since we currently don't restructure the loop we can rely
100 /// on the program order of memory accesses to determine their safety.
101 /// At the moment we will only deem accesses as safe for:
102 ///  * A negative constant distance assuming program order.
103 ///
104 ///      Safe: tmp = a[i + 1];     OR     a[i + 1] = x;
105 ///            a[i] = tmp;                y = a[i];
106 ///
107 ///   The latter case is safe because later checks guarantuee that there can't
108 ///   be a cycle through a phi node (that is, we check that "x" and "y" is not
109 ///   the same variable: a header phi can only be an induction or a reduction, a
110 ///   reduction can't have a memory sink, an induction can't have a memory
111 ///   source). This is important and must not be violated (or we have to
112 ///   resort to checking for cycles through memory).
113 ///
114 ///  * A positive constant distance assuming program order that is bigger
115 ///    than the biggest memory access.
116 ///
117 ///     tmp = a[i]        OR              b[i] = x
118 ///     a[i+2] = tmp                      y = b[i+2];
119 ///
120 ///     Safe distance: 2 x sizeof(a[0]), and 2 x sizeof(b[0]), respectively.
121 ///
122 ///  * Zero distances and all accesses have the same size.
123 ///
124 class MemoryDepChecker {
125 public:
126   typedef PointerIntPair<Value *, 1, bool> MemAccessInfo;
127   typedef SmallPtrSet<MemAccessInfo, 8> MemAccessInfoSet;
128   /// \brief Set of potential dependent memory accesses.
129   typedef EquivalenceClasses<MemAccessInfo> DepCandidates;
130 
131   /// \brief Dependece between memory access instructions.
132   struct Dependence {
133     /// \brief The type of the dependence.
134     enum DepType {
135       // No dependence.
136       NoDep,
137       // We couldn't determine the direction or the distance.
138       Unknown,
139       // Lexically forward.
140       //
141       // FIXME: If we only have loop-independent forward dependences (e.g. a
142       // read and write of A[i]), LAA will locally deem the dependence "safe"
143       // without querying the MemoryDepChecker.  Therefore we can miss
144       // enumerating loop-independent forward dependences in
145       // getDependences.  Note that as soon as there are different
146       // indices used to access the same array, the MemoryDepChecker *is*
147       // queried and the dependence list is complete.
148       Forward,
149       // Forward, but if vectorized, is likely to prevent store-to-load
150       // forwarding.
151       ForwardButPreventsForwarding,
152       // Lexically backward.
153       Backward,
154       // Backward, but the distance allows a vectorization factor of
155       // MaxSafeDepDistBytes.
156       BackwardVectorizable,
157       // Same, but may prevent store-to-load forwarding.
158       BackwardVectorizableButPreventsForwarding
159     };
160 
161     /// \brief String version of the types.
162     static const char *DepName[];
163 
164     /// \brief Index of the source of the dependence in the InstMap vector.
165     unsigned Source;
166     /// \brief Index of the destination of the dependence in the InstMap vector.
167     unsigned Destination;
168     /// \brief The type of the dependence.
169     DepType Type;
170 
DependenceDependence171     Dependence(unsigned Source, unsigned Destination, DepType Type)
172         : Source(Source), Destination(Destination), Type(Type) {}
173 
174     /// \brief Return the source instruction of the dependence.
175     Instruction *getSource(const LoopAccessInfo &LAI) const;
176     /// \brief Return the destination instruction of the dependence.
177     Instruction *getDestination(const LoopAccessInfo &LAI) const;
178 
179     /// \brief Dependence types that don't prevent vectorization.
180     static bool isSafeForVectorization(DepType Type);
181 
182     /// \brief Lexically forward dependence.
183     bool isForward() const;
184     /// \brief Lexically backward dependence.
185     bool isBackward() const;
186 
187     /// \brief May be a lexically backward dependence type (includes Unknown).
188     bool isPossiblyBackward() const;
189 
190     /// \brief Print the dependence.  \p Instr is used to map the instruction
191     /// indices to instructions.
192     void print(raw_ostream &OS, unsigned Depth,
193                const SmallVectorImpl<Instruction *> &Instrs) const;
194   };
195 
MemoryDepChecker(PredicatedScalarEvolution & PSE,const Loop * L)196   MemoryDepChecker(PredicatedScalarEvolution &PSE, const Loop *L)
197       : PSE(PSE), InnermostLoop(L), AccessIdx(0),
198         ShouldRetryWithRuntimeCheck(false), SafeForVectorization(true),
199         RecordDependences(true) {}
200 
201   /// \brief Register the location (instructions are given increasing numbers)
202   /// of a write access.
addAccess(StoreInst * SI)203   void addAccess(StoreInst *SI) {
204     Value *Ptr = SI->getPointerOperand();
205     Accesses[MemAccessInfo(Ptr, true)].push_back(AccessIdx);
206     InstMap.push_back(SI);
207     ++AccessIdx;
208   }
209 
210   /// \brief Register the location (instructions are given increasing numbers)
211   /// of a write access.
addAccess(LoadInst * LI)212   void addAccess(LoadInst *LI) {
213     Value *Ptr = LI->getPointerOperand();
214     Accesses[MemAccessInfo(Ptr, false)].push_back(AccessIdx);
215     InstMap.push_back(LI);
216     ++AccessIdx;
217   }
218 
219   /// \brief Check whether the dependencies between the accesses are safe.
220   ///
221   /// Only checks sets with elements in \p CheckDeps.
222   bool areDepsSafe(DepCandidates &AccessSets, MemAccessInfoSet &CheckDeps,
223                    const ValueToValueMap &Strides);
224 
225   /// \brief No memory dependence was encountered that would inhibit
226   /// vectorization.
isSafeForVectorization()227   bool isSafeForVectorization() const { return SafeForVectorization; }
228 
229   /// \brief The maximum number of bytes of a vector register we can vectorize
230   /// the accesses safely with.
getMaxSafeDepDistBytes()231   uint64_t getMaxSafeDepDistBytes() { return MaxSafeDepDistBytes; }
232 
233   /// \brief In same cases when the dependency check fails we can still
234   /// vectorize the loop with a dynamic array access check.
shouldRetryWithRuntimeCheck()235   bool shouldRetryWithRuntimeCheck() { return ShouldRetryWithRuntimeCheck; }
236 
237   /// \brief Returns the memory dependences.  If null is returned we exceeded
238   /// the MaxDependences threshold and this information is not
239   /// available.
getDependences()240   const SmallVectorImpl<Dependence> *getDependences() const {
241     return RecordDependences ? &Dependences : nullptr;
242   }
243 
clearDependences()244   void clearDependences() { Dependences.clear(); }
245 
246   /// \brief The vector of memory access instructions.  The indices are used as
247   /// instruction identifiers in the Dependence class.
getMemoryInstructions()248   const SmallVectorImpl<Instruction *> &getMemoryInstructions() const {
249     return InstMap;
250   }
251 
252   /// \brief Generate a mapping between the memory instructions and their
253   /// indices according to program order.
generateInstructionOrderMap()254   DenseMap<Instruction *, unsigned> generateInstructionOrderMap() const {
255     DenseMap<Instruction *, unsigned> OrderMap;
256 
257     for (unsigned I = 0; I < InstMap.size(); ++I)
258       OrderMap[InstMap[I]] = I;
259 
260     return OrderMap;
261   }
262 
263   /// \brief Find the set of instructions that read or write via \p Ptr.
264   SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
265                                                          bool isWrite) const;
266 
267 private:
268   /// A wrapper around ScalarEvolution, used to add runtime SCEV checks, and
269   /// applies dynamic knowledge to simplify SCEV expressions and convert them
270   /// to a more usable form. We need this in case assumptions about SCEV
271   /// expressions need to be made in order to avoid unknown dependences. For
272   /// example we might assume a unit stride for a pointer in order to prove
273   /// that a memory access is strided and doesn't wrap.
274   PredicatedScalarEvolution &PSE;
275   const Loop *InnermostLoop;
276 
277   /// \brief Maps access locations (ptr, read/write) to program order.
278   DenseMap<MemAccessInfo, std::vector<unsigned> > Accesses;
279 
280   /// \brief Memory access instructions in program order.
281   SmallVector<Instruction *, 16> InstMap;
282 
283   /// \brief The program order index to be used for the next instruction.
284   unsigned AccessIdx;
285 
286   // We can access this many bytes in parallel safely.
287   uint64_t MaxSafeDepDistBytes;
288 
289   /// \brief If we see a non-constant dependence distance we can still try to
290   /// vectorize this loop with runtime checks.
291   bool ShouldRetryWithRuntimeCheck;
292 
293   /// \brief No memory dependence was encountered that would inhibit
294   /// vectorization.
295   bool SafeForVectorization;
296 
297   //// \brief True if Dependences reflects the dependences in the
298   //// loop.  If false we exceeded MaxDependences and
299   //// Dependences is invalid.
300   bool RecordDependences;
301 
302   /// \brief Memory dependences collected during the analysis.  Only valid if
303   /// RecordDependences is true.
304   SmallVector<Dependence, 8> Dependences;
305 
306   /// \brief Check whether there is a plausible dependence between the two
307   /// accesses.
308   ///
309   /// Access \p A must happen before \p B in program order. The two indices
310   /// identify the index into the program order map.
311   ///
312   /// This function checks  whether there is a plausible dependence (or the
313   /// absence of such can't be proved) between the two accesses. If there is a
314   /// plausible dependence but the dependence distance is bigger than one
315   /// element access it records this distance in \p MaxSafeDepDistBytes (if this
316   /// distance is smaller than any other distance encountered so far).
317   /// Otherwise, this function returns true signaling a possible dependence.
318   Dependence::DepType isDependent(const MemAccessInfo &A, unsigned AIdx,
319                                   const MemAccessInfo &B, unsigned BIdx,
320                                   const ValueToValueMap &Strides);
321 
322   /// \brief Check whether the data dependence could prevent store-load
323   /// forwarding.
324   ///
325   /// \return false if we shouldn't vectorize at all or avoid larger
326   /// vectorization factors by limiting MaxSafeDepDistBytes.
327   bool couldPreventStoreLoadForward(uint64_t Distance, uint64_t TypeByteSize);
328 };
329 
330 /// \brief Holds information about the memory runtime legality checks to verify
331 /// that a group of pointers do not overlap.
332 class RuntimePointerChecking {
333 public:
334   struct PointerInfo {
335     /// Holds the pointer value that we need to check.
336     TrackingVH<Value> PointerValue;
337     /// Holds the pointer value at the beginning of the loop.
338     const SCEV *Start;
339     /// Holds the pointer value at the end of the loop.
340     const SCEV *End;
341     /// Holds the information if this pointer is used for writing to memory.
342     bool IsWritePtr;
343     /// Holds the id of the set of pointers that could be dependent because of a
344     /// shared underlying object.
345     unsigned DependencySetId;
346     /// Holds the id of the disjoint alias set to which this pointer belongs.
347     unsigned AliasSetId;
348     /// SCEV for the access.
349     const SCEV *Expr;
350 
PointerInfoPointerInfo351     PointerInfo(Value *PointerValue, const SCEV *Start, const SCEV *End,
352                 bool IsWritePtr, unsigned DependencySetId, unsigned AliasSetId,
353                 const SCEV *Expr)
354         : PointerValue(PointerValue), Start(Start), End(End),
355           IsWritePtr(IsWritePtr), DependencySetId(DependencySetId),
356           AliasSetId(AliasSetId), Expr(Expr) {}
357   };
358 
RuntimePointerChecking(ScalarEvolution * SE)359   RuntimePointerChecking(ScalarEvolution *SE) : Need(false), SE(SE) {}
360 
361   /// Reset the state of the pointer runtime information.
reset()362   void reset() {
363     Need = false;
364     Pointers.clear();
365     Checks.clear();
366   }
367 
368   /// Insert a pointer and calculate the start and end SCEVs.
369   /// We need \p PSE in order to compute the SCEV expression of the pointer
370   /// according to the assumptions that we've made during the analysis.
371   /// The method might also version the pointer stride according to \p Strides,
372   /// and add new predicates to \p PSE.
373   void insert(Loop *Lp, Value *Ptr, bool WritePtr, unsigned DepSetId,
374               unsigned ASId, const ValueToValueMap &Strides,
375               PredicatedScalarEvolution &PSE);
376 
377   /// \brief No run-time memory checking is necessary.
empty()378   bool empty() const { return Pointers.empty(); }
379 
380   /// A grouping of pointers. A single memcheck is required between
381   /// two groups.
382   struct CheckingPtrGroup {
383     /// \brief Create a new pointer checking group containing a single
384     /// pointer, with index \p Index in RtCheck.
CheckingPtrGroupCheckingPtrGroup385     CheckingPtrGroup(unsigned Index, RuntimePointerChecking &RtCheck)
386         : RtCheck(RtCheck), High(RtCheck.Pointers[Index].End),
387           Low(RtCheck.Pointers[Index].Start) {
388       Members.push_back(Index);
389     }
390 
391     /// \brief Tries to add the pointer recorded in RtCheck at index
392     /// \p Index to this pointer checking group. We can only add a pointer
393     /// to a checking group if we will still be able to get
394     /// the upper and lower bounds of the check. Returns true in case
395     /// of success, false otherwise.
396     bool addPointer(unsigned Index);
397 
398     /// Constitutes the context of this pointer checking group. For each
399     /// pointer that is a member of this group we will retain the index
400     /// at which it appears in RtCheck.
401     RuntimePointerChecking &RtCheck;
402     /// The SCEV expression which represents the upper bound of all the
403     /// pointers in this group.
404     const SCEV *High;
405     /// The SCEV expression which represents the lower bound of all the
406     /// pointers in this group.
407     const SCEV *Low;
408     /// Indices of all the pointers that constitute this grouping.
409     SmallVector<unsigned, 2> Members;
410   };
411 
412   /// \brief A memcheck which made up of a pair of grouped pointers.
413   ///
414   /// These *have* to be const for now, since checks are generated from
415   /// CheckingPtrGroups in LAI::addRuntimeChecks which is a const member
416   /// function.  FIXME: once check-generation is moved inside this class (after
417   /// the PtrPartition hack is removed), we could drop const.
418   typedef std::pair<const CheckingPtrGroup *, const CheckingPtrGroup *>
419       PointerCheck;
420 
421   /// \brief Generate the checks and store it.  This also performs the grouping
422   /// of pointers to reduce the number of memchecks necessary.
423   void generateChecks(MemoryDepChecker::DepCandidates &DepCands,
424                       bool UseDependencies);
425 
426   /// \brief Returns the checks that generateChecks created.
getChecks()427   const SmallVector<PointerCheck, 4> &getChecks() const { return Checks; }
428 
429   /// \brief Decide if we need to add a check between two groups of pointers,
430   /// according to needsChecking.
431   bool needsChecking(const CheckingPtrGroup &M,
432                      const CheckingPtrGroup &N) const;
433 
434   /// \brief Returns the number of run-time checks required according to
435   /// needsChecking.
getNumberOfChecks()436   unsigned getNumberOfChecks() const { return Checks.size(); }
437 
438   /// \brief Print the list run-time memory checks necessary.
439   void print(raw_ostream &OS, unsigned Depth = 0) const;
440 
441   /// Print \p Checks.
442   void printChecks(raw_ostream &OS, const SmallVectorImpl<PointerCheck> &Checks,
443                    unsigned Depth = 0) const;
444 
445   /// This flag indicates if we need to add the runtime check.
446   bool Need;
447 
448   /// Information about the pointers that may require checking.
449   SmallVector<PointerInfo, 2> Pointers;
450 
451   /// Holds a partitioning of pointers into "check groups".
452   SmallVector<CheckingPtrGroup, 2> CheckingGroups;
453 
454   /// \brief Check if pointers are in the same partition
455   ///
456   /// \p PtrToPartition contains the partition number for pointers (-1 if the
457   /// pointer belongs to multiple partitions).
458   static bool
459   arePointersInSamePartition(const SmallVectorImpl<int> &PtrToPartition,
460                              unsigned PtrIdx1, unsigned PtrIdx2);
461 
462   /// \brief Decide whether we need to issue a run-time check for pointer at
463   /// index \p I and \p J to prove their independence.
464   bool needsChecking(unsigned I, unsigned J) const;
465 
466   /// \brief Return PointerInfo for pointer at index \p PtrIdx.
getPointerInfo(unsigned PtrIdx)467   const PointerInfo &getPointerInfo(unsigned PtrIdx) const {
468     return Pointers[PtrIdx];
469   }
470 
471 private:
472   /// \brief Groups pointers such that a single memcheck is required
473   /// between two different groups. This will clear the CheckingGroups vector
474   /// and re-compute it. We will only group dependecies if \p UseDependencies
475   /// is true, otherwise we will create a separate group for each pointer.
476   void groupChecks(MemoryDepChecker::DepCandidates &DepCands,
477                    bool UseDependencies);
478 
479   /// Generate the checks and return them.
480   SmallVector<PointerCheck, 4>
481   generateChecks() const;
482 
483   /// Holds a pointer to the ScalarEvolution analysis.
484   ScalarEvolution *SE;
485 
486   /// \brief Set of run-time checks required to establish independence of
487   /// otherwise may-aliasing pointers in the loop.
488   SmallVector<PointerCheck, 4> Checks;
489 };
490 
491 /// \brief Drive the analysis of memory accesses in the loop
492 ///
493 /// This class is responsible for analyzing the memory accesses of a loop.  It
494 /// collects the accesses and then its main helper the AccessAnalysis class
495 /// finds and categorizes the dependences in buildDependenceSets.
496 ///
497 /// For memory dependences that can be analyzed at compile time, it determines
498 /// whether the dependence is part of cycle inhibiting vectorization.  This work
499 /// is delegated to the MemoryDepChecker class.
500 ///
501 /// For memory dependences that cannot be determined at compile time, it
502 /// generates run-time checks to prove independence.  This is done by
503 /// AccessAnalysis::canCheckPtrAtRT and the checks are maintained by the
504 /// RuntimePointerCheck class.
505 ///
506 /// If pointers can wrap or can't be expressed as affine AddRec expressions by
507 /// ScalarEvolution, we will generate run-time checks by emitting a
508 /// SCEVUnionPredicate.
509 ///
510 /// Checks for both memory dependences and the SCEV predicates contained in the
511 /// PSE must be emitted in order for the results of this analysis to be valid.
512 class LoopAccessInfo {
513 public:
514   LoopAccessInfo(Loop *L, ScalarEvolution *SE, const TargetLibraryInfo *TLI,
515                  AliasAnalysis *AA, DominatorTree *DT, LoopInfo *LI);
516 
517   // FIXME:
518   // Hack for MSVC 2013 which sems like it can't synthesize this even
519   // with default keyword:
520   // LoopAccessInfo(LoopAccessInfo &&LAI) = default;
LoopAccessInfo(LoopAccessInfo && LAI)521   LoopAccessInfo(LoopAccessInfo &&LAI)
522       : PSE(std::move(LAI.PSE)), PtrRtChecking(std::move(LAI.PtrRtChecking)),
523         DepChecker(std::move(LAI.DepChecker)), TheLoop(LAI.TheLoop),
524         NumLoads(LAI.NumLoads), NumStores(LAI.NumStores),
525         MaxSafeDepDistBytes(LAI.MaxSafeDepDistBytes), CanVecMem(LAI.CanVecMem),
526         StoreToLoopInvariantAddress(LAI.StoreToLoopInvariantAddress),
527         Report(std::move(LAI.Report)),
528         SymbolicStrides(std::move(LAI.SymbolicStrides)),
529         StrideSet(std::move(LAI.StrideSet)) {}
530   // LoopAccessInfo &operator=(LoopAccessInfo &&LAI) = default;
531   LoopAccessInfo &operator=(LoopAccessInfo &&LAI) {
532     assert(this != &LAI);
533 
534     PSE = std::move(LAI.PSE);
535     PtrRtChecking = std::move(LAI.PtrRtChecking);
536     DepChecker = std::move(LAI.DepChecker);
537     TheLoop = LAI.TheLoop;
538     NumLoads = LAI.NumLoads;
539     NumStores = LAI.NumStores;
540     MaxSafeDepDistBytes = LAI.MaxSafeDepDistBytes;
541     CanVecMem = LAI.CanVecMem;
542     StoreToLoopInvariantAddress = LAI.StoreToLoopInvariantAddress;
543     Report = std::move(LAI.Report);
544     SymbolicStrides = std::move(LAI.SymbolicStrides);
545     StrideSet = std::move(LAI.StrideSet);
546     return *this;
547   }
548 
549   /// Return true we can analyze the memory accesses in the loop and there are
550   /// no memory dependence cycles.
canVectorizeMemory()551   bool canVectorizeMemory() const { return CanVecMem; }
552 
getRuntimePointerChecking()553   const RuntimePointerChecking *getRuntimePointerChecking() const {
554     return PtrRtChecking.get();
555   }
556 
557   /// \brief Number of memchecks required to prove independence of otherwise
558   /// may-alias pointers.
getNumRuntimePointerChecks()559   unsigned getNumRuntimePointerChecks() const {
560     return PtrRtChecking->getNumberOfChecks();
561   }
562 
563   /// Return true if the block BB needs to be predicated in order for the loop
564   /// to be vectorized.
565   static bool blockNeedsPredication(BasicBlock *BB, Loop *TheLoop,
566                                     DominatorTree *DT);
567 
568   /// Returns true if the value V is uniform within the loop.
569   bool isUniform(Value *V) const;
570 
getMaxSafeDepDistBytes()571   uint64_t getMaxSafeDepDistBytes() const { return MaxSafeDepDistBytes; }
getNumStores()572   unsigned getNumStores() const { return NumStores; }
getNumLoads()573   unsigned getNumLoads() const { return NumLoads;}
574 
575   /// \brief Add code that checks at runtime if the accessed arrays overlap.
576   ///
577   /// Returns a pair of instructions where the first element is the first
578   /// instruction generated in possibly a sequence of instructions and the
579   /// second value is the final comparator value or NULL if no check is needed.
580   std::pair<Instruction *, Instruction *>
581   addRuntimeChecks(Instruction *Loc) const;
582 
583   /// \brief Generete the instructions for the checks in \p PointerChecks.
584   ///
585   /// Returns a pair of instructions where the first element is the first
586   /// instruction generated in possibly a sequence of instructions and the
587   /// second value is the final comparator value or NULL if no check is needed.
588   std::pair<Instruction *, Instruction *>
589   addRuntimeChecks(Instruction *Loc,
590                    const SmallVectorImpl<RuntimePointerChecking::PointerCheck>
591                        &PointerChecks) const;
592 
593   /// \brief The diagnostics report generated for the analysis.  E.g. why we
594   /// couldn't analyze the loop.
getReport()595   const Optional<LoopAccessReport> &getReport() const { return Report; }
596 
597   /// \brief the Memory Dependence Checker which can determine the
598   /// loop-independent and loop-carried dependences between memory accesses.
getDepChecker()599   const MemoryDepChecker &getDepChecker() const { return *DepChecker; }
600 
601   /// \brief Return the list of instructions that use \p Ptr to read or write
602   /// memory.
getInstructionsForAccess(Value * Ptr,bool isWrite)603   SmallVector<Instruction *, 4> getInstructionsForAccess(Value *Ptr,
604                                                          bool isWrite) const {
605     return DepChecker->getInstructionsForAccess(Ptr, isWrite);
606   }
607 
608   /// \brief If an access has a symbolic strides, this maps the pointer value to
609   /// the stride symbol.
getSymbolicStrides()610   const ValueToValueMap &getSymbolicStrides() const { return SymbolicStrides; }
611 
612   /// \brief Pointer has a symbolic stride.
hasStride(Value * V)613   bool hasStride(Value *V) const { return StrideSet.count(V); }
614 
615   /// \brief Print the information about the memory accesses in the loop.
616   void print(raw_ostream &OS, unsigned Depth = 0) const;
617 
618   /// \brief Checks existence of store to invariant address inside loop.
619   /// If the loop has any store to invariant address, then it returns true,
620   /// else returns false.
hasStoreToLoopInvariantAddress()621   bool hasStoreToLoopInvariantAddress() const {
622     return StoreToLoopInvariantAddress;
623   }
624 
625   /// Used to add runtime SCEV checks. Simplifies SCEV expressions and converts
626   /// them to a more usable form.  All SCEV expressions during the analysis
627   /// should be re-written (and therefore simplified) according to PSE.
628   /// A user of LoopAccessAnalysis will need to emit the runtime checks
629   /// associated with this predicate.
getPSE()630   const PredicatedScalarEvolution &getPSE() const { return *PSE; }
631 
632 private:
633   /// \brief Analyze the loop.
634   void analyzeLoop(AliasAnalysis *AA, LoopInfo *LI,
635                    const TargetLibraryInfo *TLI, DominatorTree *DT);
636 
637   /// \brief Check if the structure of the loop allows it to be analyzed by this
638   /// pass.
639   bool canAnalyzeLoop();
640 
641   void emitAnalysis(LoopAccessReport &Message);
642 
643   /// \brief Collect memory access with loop invariant strides.
644   ///
645   /// Looks for accesses like "a[i * StrideA]" where "StrideA" is loop
646   /// invariant.
647   void collectStridedAccess(Value *LoadOrStoreInst);
648 
649   std::unique_ptr<PredicatedScalarEvolution> PSE;
650 
651   /// We need to check that all of the pointers in this list are disjoint
652   /// at runtime. Using std::unique_ptr to make using move ctor simpler.
653   std::unique_ptr<RuntimePointerChecking> PtrRtChecking;
654 
655   /// \brief the Memory Dependence Checker which can determine the
656   /// loop-independent and loop-carried dependences between memory accesses.
657   std::unique_ptr<MemoryDepChecker> DepChecker;
658 
659   Loop *TheLoop;
660 
661   unsigned NumLoads;
662   unsigned NumStores;
663 
664   uint64_t MaxSafeDepDistBytes;
665 
666   /// \brief Cache the result of analyzeLoop.
667   bool CanVecMem;
668 
669   /// \brief Indicator for storing to uniform addresses.
670   /// If a loop has write to a loop invariant address then it should be true.
671   bool StoreToLoopInvariantAddress;
672 
673   /// \brief The diagnostics report generated for the analysis.  E.g. why we
674   /// couldn't analyze the loop.
675   Optional<LoopAccessReport> Report;
676 
677   /// \brief If an access has a symbolic strides, this maps the pointer value to
678   /// the stride symbol.
679   ValueToValueMap SymbolicStrides;
680 
681   /// \brief Set of symbolic strides values.
682   SmallPtrSet<Value *, 8> StrideSet;
683 };
684 
685 Value *stripIntegerCast(Value *V);
686 
687 /// \brief Return the SCEV corresponding to a pointer with the symbolic stride
688 /// replaced with constant one, assuming the SCEV predicate associated with
689 /// \p PSE is true.
690 ///
691 /// If necessary this method will version the stride of the pointer according
692 /// to \p PtrToStride and therefore add further predicates to \p PSE.
693 ///
694 /// If \p OrigPtr is not null, use it to look up the stride value instead of \p
695 /// Ptr.  \p PtrToStride provides the mapping between the pointer value and its
696 /// stride as collected by LoopVectorizationLegality::collectStridedAccess.
697 const SCEV *replaceSymbolicStrideSCEV(PredicatedScalarEvolution &PSE,
698                                       const ValueToValueMap &PtrToStride,
699                                       Value *Ptr, Value *OrigPtr = nullptr);
700 
701 /// \brief If the pointer has a constant stride return it in units of its
702 /// element size.  Otherwise return zero.
703 ///
704 /// Ensure that it does not wrap in the address space, assuming the predicate
705 /// associated with \p PSE is true.
706 ///
707 /// If necessary this method will version the stride of the pointer according
708 /// to \p PtrToStride and therefore add further predicates to \p PSE.
709 /// The \p Assume parameter indicates if we are allowed to make additional
710 /// run-time assumptions.
711 int64_t getPtrStride(PredicatedScalarEvolution &PSE, Value *Ptr, const Loop *Lp,
712                      const ValueToValueMap &StridesMap = ValueToValueMap(),
713                      bool Assume = false);
714 
715 /// \brief Returns true if the memory operations \p A and \p B are consecutive.
716 /// This is a simple API that does not depend on the analysis pass.
717 bool isConsecutiveAccess(Value *A, Value *B, const DataLayout &DL,
718                          ScalarEvolution &SE, bool CheckType = true);
719 
720 /// \brief This analysis provides dependence information for the memory accesses
721 /// of a loop.
722 ///
723 /// It runs the analysis for a loop on demand.  This can be initiated by
724 /// querying the loop access info via LAA::getInfo.  getInfo return a
725 /// LoopAccessInfo object.  See this class for the specifics of what information
726 /// is provided.
727 class LoopAccessLegacyAnalysis : public FunctionPass {
728 public:
729   static char ID;
730 
LoopAccessLegacyAnalysis()731   LoopAccessLegacyAnalysis() : FunctionPass(ID) {
732     initializeLoopAccessLegacyAnalysisPass(*PassRegistry::getPassRegistry());
733   }
734 
735   bool runOnFunction(Function &F) override;
736 
737   void getAnalysisUsage(AnalysisUsage &AU) const override;
738 
739   /// \brief Query the result of the loop access information for the loop \p L.
740   ///
741   /// If there is no cached result available run the analysis.
742   const LoopAccessInfo &getInfo(Loop *L);
743 
releaseMemory()744   void releaseMemory() override {
745     // Invalidate the cache when the pass is freed.
746     LoopAccessInfoMap.clear();
747   }
748 
749   /// \brief Print the result of the analysis when invoked with -analyze.
750   void print(raw_ostream &OS, const Module *M = nullptr) const override;
751 
752 private:
753   /// \brief The cache.
754   DenseMap<Loop *, std::unique_ptr<LoopAccessInfo>> LoopAccessInfoMap;
755 
756   // The used analysis passes.
757   ScalarEvolution *SE;
758   const TargetLibraryInfo *TLI;
759   AliasAnalysis *AA;
760   DominatorTree *DT;
761   LoopInfo *LI;
762 };
763 
764 /// \brief This analysis provides dependence information for the memory
765 /// accesses of a loop.
766 ///
767 /// It runs the analysis for a loop on demand.  This can be initiated by
768 /// querying the loop access info via AM.getResult<LoopAccessAnalysis>.
769 /// getResult return a LoopAccessInfo object.  See this class for the
770 /// specifics of what information is provided.
771 class LoopAccessAnalysis
772     : public AnalysisInfoMixin<LoopAccessAnalysis> {
773   friend AnalysisInfoMixin<LoopAccessAnalysis>;
774   static char PassID;
775 
776 public:
777   typedef LoopAccessInfo Result;
778   Result run(Loop &, AnalysisManager<Loop> &);
name()779   static StringRef name() { return "LoopAccessAnalysis"; }
780 };
781 
782 /// \brief Printer pass for the \c LoopAccessInfo results.
783 class LoopAccessInfoPrinterPass
784     : public PassInfoMixin<LoopAccessInfoPrinterPass> {
785   raw_ostream &OS;
786 
787 public:
LoopAccessInfoPrinterPass(raw_ostream & OS)788   explicit LoopAccessInfoPrinterPass(raw_ostream &OS) : OS(OS) {}
789   PreservedAnalyses run(Loop &L, AnalysisManager<Loop> &AM);
790 };
791 
getSource(const LoopAccessInfo & LAI)792 inline Instruction *MemoryDepChecker::Dependence::getSource(
793     const LoopAccessInfo &LAI) const {
794   return LAI.getDepChecker().getMemoryInstructions()[Source];
795 }
796 
getDestination(const LoopAccessInfo & LAI)797 inline Instruction *MemoryDepChecker::Dependence::getDestination(
798     const LoopAccessInfo &LAI) const {
799   return LAI.getDepChecker().getMemoryInstructions()[Destination];
800 }
801 
802 } // End llvm namespace
803 
804 #endif
805