1 //===- AliasAnalysis.cpp - Generic Alias Analysis Interface Implementation -==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the generic AliasAnalysis interface which is used as the
11 // common interface used by all clients and implementations of alias analysis.
12 //
13 // This file also implements the default version of the AliasAnalysis interface
14 // that is to be used when no other implementation is specified. This does some
15 // simple tests that detect obvious cases: two different global pointers cannot
16 // alias, a global cannot alias a malloc, two different mallocs cannot alias,
17 // etc.
18 //
19 // This alias analysis implementation really isn't very good for anything, but
20 // it is very fast, and makes a nice clean default implementation. Because it
21 // handles lots of little corner cases, other, more complex, alias analysis
22 // implementations may choose to rely on this pass to resolve these simple and
23 // easy cases.
24 //
25 //===----------------------------------------------------------------------===//
26
27 #include "llvm/Analysis/AliasAnalysis.h"
28 #include "llvm/Analysis/BasicAliasAnalysis.h"
29 #include "llvm/Analysis/CFG.h"
30 #include "llvm/Analysis/CFLAndersAliasAnalysis.h"
31 #include "llvm/Analysis/CFLSteensAliasAnalysis.h"
32 #include "llvm/Analysis/CaptureTracking.h"
33 #include "llvm/Analysis/GlobalsModRef.h"
34 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
35 #include "llvm/Analysis/ScalarEvolutionAliasAnalysis.h"
36 #include "llvm/Analysis/ScopedNoAliasAA.h"
37 #include "llvm/Analysis/TargetLibraryInfo.h"
38 #include "llvm/Analysis/TypeBasedAliasAnalysis.h"
39 #include "llvm/Analysis/ValueTracking.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/DataLayout.h"
42 #include "llvm/IR/Dominators.h"
43 #include "llvm/IR/Function.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/LLVMContext.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/Pass.h"
49 using namespace llvm;
50
51 /// Allow disabling BasicAA from the AA results. This is particularly useful
52 /// when testing to isolate a single AA implementation.
53 static cl::opt<bool> DisableBasicAA("disable-basicaa", cl::Hidden,
54 cl::init(false));
55
AAResults(AAResults && Arg)56 AAResults::AAResults(AAResults &&Arg) : TLI(Arg.TLI), AAs(std::move(Arg.AAs)) {
57 for (auto &AA : AAs)
58 AA->setAAResults(this);
59 }
60
~AAResults()61 AAResults::~AAResults() {
62 // FIXME; It would be nice to at least clear out the pointers back to this
63 // aggregation here, but we end up with non-nesting lifetimes in the legacy
64 // pass manager that prevent this from working. In the legacy pass manager
65 // we'll end up with dangling references here in some cases.
66 #if 0
67 for (auto &AA : AAs)
68 AA->setAAResults(nullptr);
69 #endif
70 }
71
72 //===----------------------------------------------------------------------===//
73 // Default chaining methods
74 //===----------------------------------------------------------------------===//
75
alias(const MemoryLocation & LocA,const MemoryLocation & LocB)76 AliasResult AAResults::alias(const MemoryLocation &LocA,
77 const MemoryLocation &LocB) {
78 for (const auto &AA : AAs) {
79 auto Result = AA->alias(LocA, LocB);
80 if (Result != MayAlias)
81 return Result;
82 }
83 return MayAlias;
84 }
85
pointsToConstantMemory(const MemoryLocation & Loc,bool OrLocal)86 bool AAResults::pointsToConstantMemory(const MemoryLocation &Loc,
87 bool OrLocal) {
88 for (const auto &AA : AAs)
89 if (AA->pointsToConstantMemory(Loc, OrLocal))
90 return true;
91
92 return false;
93 }
94
getArgModRefInfo(ImmutableCallSite CS,unsigned ArgIdx)95 ModRefInfo AAResults::getArgModRefInfo(ImmutableCallSite CS, unsigned ArgIdx) {
96 ModRefInfo Result = MRI_ModRef;
97
98 for (const auto &AA : AAs) {
99 Result = ModRefInfo(Result & AA->getArgModRefInfo(CS, ArgIdx));
100
101 // Early-exit the moment we reach the bottom of the lattice.
102 if (Result == MRI_NoModRef)
103 return Result;
104 }
105
106 return Result;
107 }
108
getModRefInfo(Instruction * I,ImmutableCallSite Call)109 ModRefInfo AAResults::getModRefInfo(Instruction *I, ImmutableCallSite Call) {
110 // We may have two calls
111 if (auto CS = ImmutableCallSite(I)) {
112 // Check if the two calls modify the same memory
113 return getModRefInfo(CS, Call);
114 } else {
115 // Otherwise, check if the call modifies or references the
116 // location this memory access defines. The best we can say
117 // is that if the call references what this instruction
118 // defines, it must be clobbered by this location.
119 const MemoryLocation DefLoc = MemoryLocation::get(I);
120 if (getModRefInfo(Call, DefLoc) != MRI_NoModRef)
121 return MRI_ModRef;
122 }
123 return MRI_NoModRef;
124 }
125
getModRefInfo(ImmutableCallSite CS,const MemoryLocation & Loc)126 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS,
127 const MemoryLocation &Loc) {
128 ModRefInfo Result = MRI_ModRef;
129
130 for (const auto &AA : AAs) {
131 Result = ModRefInfo(Result & AA->getModRefInfo(CS, Loc));
132
133 // Early-exit the moment we reach the bottom of the lattice.
134 if (Result == MRI_NoModRef)
135 return Result;
136 }
137
138 // Try to refine the mod-ref info further using other API entry points to the
139 // aggregate set of AA results.
140 auto MRB = getModRefBehavior(CS);
141 if (MRB == FMRB_DoesNotAccessMemory)
142 return MRI_NoModRef;
143
144 if (onlyReadsMemory(MRB))
145 Result = ModRefInfo(Result & MRI_Ref);
146 else if (doesNotReadMemory(MRB))
147 Result = ModRefInfo(Result & MRI_Mod);
148
149 if (onlyAccessesArgPointees(MRB)) {
150 bool DoesAlias = false;
151 ModRefInfo AllArgsMask = MRI_NoModRef;
152 if (doesAccessArgPointees(MRB)) {
153 for (auto AI = CS.arg_begin(), AE = CS.arg_end(); AI != AE; ++AI) {
154 const Value *Arg = *AI;
155 if (!Arg->getType()->isPointerTy())
156 continue;
157 unsigned ArgIdx = std::distance(CS.arg_begin(), AI);
158 MemoryLocation ArgLoc = MemoryLocation::getForArgument(CS, ArgIdx, TLI);
159 AliasResult ArgAlias = alias(ArgLoc, Loc);
160 if (ArgAlias != NoAlias) {
161 ModRefInfo ArgMask = getArgModRefInfo(CS, ArgIdx);
162 DoesAlias = true;
163 AllArgsMask = ModRefInfo(AllArgsMask | ArgMask);
164 }
165 }
166 }
167 if (!DoesAlias)
168 return MRI_NoModRef;
169 Result = ModRefInfo(Result & AllArgsMask);
170 }
171
172 // If Loc is a constant memory location, the call definitely could not
173 // modify the memory location.
174 if ((Result & MRI_Mod) &&
175 pointsToConstantMemory(Loc, /*OrLocal*/ false))
176 Result = ModRefInfo(Result & ~MRI_Mod);
177
178 return Result;
179 }
180
getModRefInfo(ImmutableCallSite CS1,ImmutableCallSite CS2)181 ModRefInfo AAResults::getModRefInfo(ImmutableCallSite CS1,
182 ImmutableCallSite CS2) {
183 ModRefInfo Result = MRI_ModRef;
184
185 for (const auto &AA : AAs) {
186 Result = ModRefInfo(Result & AA->getModRefInfo(CS1, CS2));
187
188 // Early-exit the moment we reach the bottom of the lattice.
189 if (Result == MRI_NoModRef)
190 return Result;
191 }
192
193 // Try to refine the mod-ref info further using other API entry points to the
194 // aggregate set of AA results.
195
196 // If CS1 or CS2 are readnone, they don't interact.
197 auto CS1B = getModRefBehavior(CS1);
198 if (CS1B == FMRB_DoesNotAccessMemory)
199 return MRI_NoModRef;
200
201 auto CS2B = getModRefBehavior(CS2);
202 if (CS2B == FMRB_DoesNotAccessMemory)
203 return MRI_NoModRef;
204
205 // If they both only read from memory, there is no dependence.
206 if (onlyReadsMemory(CS1B) && onlyReadsMemory(CS2B))
207 return MRI_NoModRef;
208
209 // If CS1 only reads memory, the only dependence on CS2 can be
210 // from CS1 reading memory written by CS2.
211 if (onlyReadsMemory(CS1B))
212 Result = ModRefInfo(Result & MRI_Ref);
213 else if (doesNotReadMemory(CS1B))
214 Result = ModRefInfo(Result & MRI_Mod);
215
216 // If CS2 only access memory through arguments, accumulate the mod/ref
217 // information from CS1's references to the memory referenced by
218 // CS2's arguments.
219 if (onlyAccessesArgPointees(CS2B)) {
220 ModRefInfo R = MRI_NoModRef;
221 if (doesAccessArgPointees(CS2B)) {
222 for (auto I = CS2.arg_begin(), E = CS2.arg_end(); I != E; ++I) {
223 const Value *Arg = *I;
224 if (!Arg->getType()->isPointerTy())
225 continue;
226 unsigned CS2ArgIdx = std::distance(CS2.arg_begin(), I);
227 auto CS2ArgLoc = MemoryLocation::getForArgument(CS2, CS2ArgIdx, TLI);
228
229 // ArgMask indicates what CS2 might do to CS2ArgLoc, and the dependence
230 // of CS1 on that location is the inverse.
231 ModRefInfo ArgMask = getArgModRefInfo(CS2, CS2ArgIdx);
232 if (ArgMask == MRI_Mod)
233 ArgMask = MRI_ModRef;
234 else if (ArgMask == MRI_Ref)
235 ArgMask = MRI_Mod;
236
237 ArgMask = ModRefInfo(ArgMask & getModRefInfo(CS1, CS2ArgLoc));
238
239 R = ModRefInfo((R | ArgMask) & Result);
240 if (R == Result)
241 break;
242 }
243 }
244 return R;
245 }
246
247 // If CS1 only accesses memory through arguments, check if CS2 references
248 // any of the memory referenced by CS1's arguments. If not, return NoModRef.
249 if (onlyAccessesArgPointees(CS1B)) {
250 ModRefInfo R = MRI_NoModRef;
251 if (doesAccessArgPointees(CS1B)) {
252 for (auto I = CS1.arg_begin(), E = CS1.arg_end(); I != E; ++I) {
253 const Value *Arg = *I;
254 if (!Arg->getType()->isPointerTy())
255 continue;
256 unsigned CS1ArgIdx = std::distance(CS1.arg_begin(), I);
257 auto CS1ArgLoc = MemoryLocation::getForArgument(CS1, CS1ArgIdx, TLI);
258
259 // ArgMask indicates what CS1 might do to CS1ArgLoc; if CS1 might Mod
260 // CS1ArgLoc, then we care about either a Mod or a Ref by CS2. If CS1
261 // might Ref, then we care only about a Mod by CS2.
262 ModRefInfo ArgMask = getArgModRefInfo(CS1, CS1ArgIdx);
263 ModRefInfo ArgR = getModRefInfo(CS2, CS1ArgLoc);
264 if (((ArgMask & MRI_Mod) != MRI_NoModRef &&
265 (ArgR & MRI_ModRef) != MRI_NoModRef) ||
266 ((ArgMask & MRI_Ref) != MRI_NoModRef &&
267 (ArgR & MRI_Mod) != MRI_NoModRef))
268 R = ModRefInfo((R | ArgMask) & Result);
269
270 if (R == Result)
271 break;
272 }
273 }
274 return R;
275 }
276
277 return Result;
278 }
279
getModRefBehavior(ImmutableCallSite CS)280 FunctionModRefBehavior AAResults::getModRefBehavior(ImmutableCallSite CS) {
281 FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
282
283 for (const auto &AA : AAs) {
284 Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(CS));
285
286 // Early-exit the moment we reach the bottom of the lattice.
287 if (Result == FMRB_DoesNotAccessMemory)
288 return Result;
289 }
290
291 return Result;
292 }
293
getModRefBehavior(const Function * F)294 FunctionModRefBehavior AAResults::getModRefBehavior(const Function *F) {
295 FunctionModRefBehavior Result = FMRB_UnknownModRefBehavior;
296
297 for (const auto &AA : AAs) {
298 Result = FunctionModRefBehavior(Result & AA->getModRefBehavior(F));
299
300 // Early-exit the moment we reach the bottom of the lattice.
301 if (Result == FMRB_DoesNotAccessMemory)
302 return Result;
303 }
304
305 return Result;
306 }
307
308 //===----------------------------------------------------------------------===//
309 // Helper method implementation
310 //===----------------------------------------------------------------------===//
311
getModRefInfo(const LoadInst * L,const MemoryLocation & Loc)312 ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
313 const MemoryLocation &Loc) {
314 // Be conservative in the face of volatile/atomic.
315 if (!L->isUnordered())
316 return MRI_ModRef;
317
318 // If the load address doesn't alias the given address, it doesn't read
319 // or write the specified memory.
320 if (Loc.Ptr && !alias(MemoryLocation::get(L), Loc))
321 return MRI_NoModRef;
322
323 // Otherwise, a load just reads.
324 return MRI_Ref;
325 }
326
getModRefInfo(const StoreInst * S,const MemoryLocation & Loc)327 ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
328 const MemoryLocation &Loc) {
329 // Be conservative in the face of volatile/atomic.
330 if (!S->isUnordered())
331 return MRI_ModRef;
332
333 if (Loc.Ptr) {
334 // If the store address cannot alias the pointer in question, then the
335 // specified memory cannot be modified by the store.
336 if (!alias(MemoryLocation::get(S), Loc))
337 return MRI_NoModRef;
338
339 // If the pointer is a pointer to constant memory, then it could not have
340 // been modified by this store.
341 if (pointsToConstantMemory(Loc))
342 return MRI_NoModRef;
343 }
344
345 // Otherwise, a store just writes.
346 return MRI_Mod;
347 }
348
getModRefInfo(const VAArgInst * V,const MemoryLocation & Loc)349 ModRefInfo AAResults::getModRefInfo(const VAArgInst *V,
350 const MemoryLocation &Loc) {
351
352 if (Loc.Ptr) {
353 // If the va_arg address cannot alias the pointer in question, then the
354 // specified memory cannot be accessed by the va_arg.
355 if (!alias(MemoryLocation::get(V), Loc))
356 return MRI_NoModRef;
357
358 // If the pointer is a pointer to constant memory, then it could not have
359 // been modified by this va_arg.
360 if (pointsToConstantMemory(Loc))
361 return MRI_NoModRef;
362 }
363
364 // Otherwise, a va_arg reads and writes.
365 return MRI_ModRef;
366 }
367
getModRefInfo(const CatchPadInst * CatchPad,const MemoryLocation & Loc)368 ModRefInfo AAResults::getModRefInfo(const CatchPadInst *CatchPad,
369 const MemoryLocation &Loc) {
370 if (Loc.Ptr) {
371 // If the pointer is a pointer to constant memory,
372 // then it could not have been modified by this catchpad.
373 if (pointsToConstantMemory(Loc))
374 return MRI_NoModRef;
375 }
376
377 // Otherwise, a catchpad reads and writes.
378 return MRI_ModRef;
379 }
380
getModRefInfo(const CatchReturnInst * CatchRet,const MemoryLocation & Loc)381 ModRefInfo AAResults::getModRefInfo(const CatchReturnInst *CatchRet,
382 const MemoryLocation &Loc) {
383 if (Loc.Ptr) {
384 // If the pointer is a pointer to constant memory,
385 // then it could not have been modified by this catchpad.
386 if (pointsToConstantMemory(Loc))
387 return MRI_NoModRef;
388 }
389
390 // Otherwise, a catchret reads and writes.
391 return MRI_ModRef;
392 }
393
getModRefInfo(const AtomicCmpXchgInst * CX,const MemoryLocation & Loc)394 ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
395 const MemoryLocation &Loc) {
396 // Acquire/Release cmpxchg has properties that matter for arbitrary addresses.
397 if (isStrongerThanMonotonic(CX->getSuccessOrdering()))
398 return MRI_ModRef;
399
400 // If the cmpxchg address does not alias the location, it does not access it.
401 if (Loc.Ptr && !alias(MemoryLocation::get(CX), Loc))
402 return MRI_NoModRef;
403
404 return MRI_ModRef;
405 }
406
getModRefInfo(const AtomicRMWInst * RMW,const MemoryLocation & Loc)407 ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
408 const MemoryLocation &Loc) {
409 // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
410 if (isStrongerThanMonotonic(RMW->getOrdering()))
411 return MRI_ModRef;
412
413 // If the atomicrmw address does not alias the location, it does not access it.
414 if (Loc.Ptr && !alias(MemoryLocation::get(RMW), Loc))
415 return MRI_NoModRef;
416
417 return MRI_ModRef;
418 }
419
420 /// \brief Return information about whether a particular call site modifies
421 /// or reads the specified memory location \p MemLoc before instruction \p I
422 /// in a BasicBlock. A ordered basic block \p OBB can be used to speed up
423 /// instruction-ordering queries inside the BasicBlock containing \p I.
424 /// FIXME: this is really just shoring-up a deficiency in alias analysis.
425 /// BasicAA isn't willing to spend linear time determining whether an alloca
426 /// was captured before or after this particular call, while we are. However,
427 /// with a smarter AA in place, this test is just wasting compile time.
callCapturesBefore(const Instruction * I,const MemoryLocation & MemLoc,DominatorTree * DT,OrderedBasicBlock * OBB)428 ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
429 const MemoryLocation &MemLoc,
430 DominatorTree *DT,
431 OrderedBasicBlock *OBB) {
432 if (!DT)
433 return MRI_ModRef;
434
435 const Value *Object =
436 GetUnderlyingObject(MemLoc.Ptr, I->getModule()->getDataLayout());
437 if (!isIdentifiedObject(Object) || isa<GlobalValue>(Object) ||
438 isa<Constant>(Object))
439 return MRI_ModRef;
440
441 ImmutableCallSite CS(I);
442 if (!CS.getInstruction() || CS.getInstruction() == Object)
443 return MRI_ModRef;
444
445 if (llvm::PointerMayBeCapturedBefore(Object, /* ReturnCaptures */ true,
446 /* StoreCaptures */ true, I, DT,
447 /* include Object */ true,
448 /* OrderedBasicBlock */ OBB))
449 return MRI_ModRef;
450
451 unsigned ArgNo = 0;
452 ModRefInfo R = MRI_NoModRef;
453 for (auto CI = CS.data_operands_begin(), CE = CS.data_operands_end();
454 CI != CE; ++CI, ++ArgNo) {
455 // Only look at the no-capture or byval pointer arguments. If this
456 // pointer were passed to arguments that were neither of these, then it
457 // couldn't be no-capture.
458 if (!(*CI)->getType()->isPointerTy() ||
459 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
460 continue;
461
462 // If this is a no-capture pointer argument, see if we can tell that it
463 // is impossible to alias the pointer we're checking. If not, we have to
464 // assume that the call could touch the pointer, even though it doesn't
465 // escape.
466 if (isNoAlias(MemoryLocation(*CI), MemoryLocation(Object)))
467 continue;
468 if (CS.doesNotAccessMemory(ArgNo))
469 continue;
470 if (CS.onlyReadsMemory(ArgNo)) {
471 R = MRI_Ref;
472 continue;
473 }
474 return MRI_ModRef;
475 }
476 return R;
477 }
478
479 /// canBasicBlockModify - Return true if it is possible for execution of the
480 /// specified basic block to modify the location Loc.
481 ///
canBasicBlockModify(const BasicBlock & BB,const MemoryLocation & Loc)482 bool AAResults::canBasicBlockModify(const BasicBlock &BB,
483 const MemoryLocation &Loc) {
484 return canInstructionRangeModRef(BB.front(), BB.back(), Loc, MRI_Mod);
485 }
486
487 /// canInstructionRangeModRef - Return true if it is possible for the
488 /// execution of the specified instructions to mod\ref (according to the
489 /// mode) the location Loc. The instructions to consider are all
490 /// of the instructions in the range of [I1,I2] INCLUSIVE.
491 /// I1 and I2 must be in the same basic block.
canInstructionRangeModRef(const Instruction & I1,const Instruction & I2,const MemoryLocation & Loc,const ModRefInfo Mode)492 bool AAResults::canInstructionRangeModRef(const Instruction &I1,
493 const Instruction &I2,
494 const MemoryLocation &Loc,
495 const ModRefInfo Mode) {
496 assert(I1.getParent() == I2.getParent() &&
497 "Instructions not in same basic block!");
498 BasicBlock::const_iterator I = I1.getIterator();
499 BasicBlock::const_iterator E = I2.getIterator();
500 ++E; // Convert from inclusive to exclusive range.
501
502 for (; I != E; ++I) // Check every instruction in range
503 if (getModRefInfo(&*I, Loc) & Mode)
504 return true;
505 return false;
506 }
507
508 // Provide a definition for the root virtual destructor.
~Concept()509 AAResults::Concept::~Concept() {}
510
511 // Provide a definition for the static object used to identify passes.
512 char AAManager::PassID;
513
514 namespace {
515 /// A wrapper pass for external alias analyses. This just squirrels away the
516 /// callback used to run any analyses and register their results.
517 struct ExternalAAWrapperPass : ImmutablePass {
518 typedef std::function<void(Pass &, Function &, AAResults &)> CallbackT;
519
520 CallbackT CB;
521
522 static char ID;
523
ExternalAAWrapperPass__anon37c20d300111::ExternalAAWrapperPass524 ExternalAAWrapperPass() : ImmutablePass(ID) {
525 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
526 }
ExternalAAWrapperPass__anon37c20d300111::ExternalAAWrapperPass527 explicit ExternalAAWrapperPass(CallbackT CB)
528 : ImmutablePass(ID), CB(std::move(CB)) {
529 initializeExternalAAWrapperPassPass(*PassRegistry::getPassRegistry());
530 }
531
getAnalysisUsage__anon37c20d300111::ExternalAAWrapperPass532 void getAnalysisUsage(AnalysisUsage &AU) const override {
533 AU.setPreservesAll();
534 }
535 };
536 }
537
538 char ExternalAAWrapperPass::ID = 0;
539 INITIALIZE_PASS(ExternalAAWrapperPass, "external-aa", "External Alias Analysis",
540 false, true)
541
542 ImmutablePass *
createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback)543 llvm::createExternalAAWrapperPass(ExternalAAWrapperPass::CallbackT Callback) {
544 return new ExternalAAWrapperPass(std::move(Callback));
545 }
546
AAResultsWrapperPass()547 AAResultsWrapperPass::AAResultsWrapperPass() : FunctionPass(ID) {
548 initializeAAResultsWrapperPassPass(*PassRegistry::getPassRegistry());
549 }
550
551 char AAResultsWrapperPass::ID = 0;
552
553 INITIALIZE_PASS_BEGIN(AAResultsWrapperPass, "aa",
554 "Function Alias Analysis Results", false, true)
INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)555 INITIALIZE_PASS_DEPENDENCY(BasicAAWrapperPass)
556 INITIALIZE_PASS_DEPENDENCY(CFLAndersAAWrapperPass)
557 INITIALIZE_PASS_DEPENDENCY(CFLSteensAAWrapperPass)
558 INITIALIZE_PASS_DEPENDENCY(ExternalAAWrapperPass)
559 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
560 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
561 INITIALIZE_PASS_DEPENDENCY(SCEVAAWrapperPass)
562 INITIALIZE_PASS_DEPENDENCY(ScopedNoAliasAAWrapperPass)
563 INITIALIZE_PASS_DEPENDENCY(TypeBasedAAWrapperPass)
564 INITIALIZE_PASS_END(AAResultsWrapperPass, "aa",
565 "Function Alias Analysis Results", false, true)
566
567 FunctionPass *llvm::createAAResultsWrapperPass() {
568 return new AAResultsWrapperPass();
569 }
570
571 /// Run the wrapper pass to rebuild an aggregation over known AA passes.
572 ///
573 /// This is the legacy pass manager's interface to the new-style AA results
574 /// aggregation object. Because this is somewhat shoe-horned into the legacy
575 /// pass manager, we hard code all the specific alias analyses available into
576 /// it. While the particular set enabled is configured via commandline flags,
577 /// adding a new alias analysis to LLVM will require adding support for it to
578 /// this list.
runOnFunction(Function & F)579 bool AAResultsWrapperPass::runOnFunction(Function &F) {
580 // NB! This *must* be reset before adding new AA results to the new
581 // AAResults object because in the legacy pass manager, each instance
582 // of these will refer to the *same* immutable analyses, registering and
583 // unregistering themselves with them. We need to carefully tear down the
584 // previous object first, in this case replacing it with an empty one, before
585 // registering new results.
586 AAR.reset(
587 new AAResults(getAnalysis<TargetLibraryInfoWrapperPass>().getTLI()));
588
589 // BasicAA is always available for function analyses. Also, we add it first
590 // so that it can trump TBAA results when it proves MustAlias.
591 // FIXME: TBAA should have an explicit mode to support this and then we
592 // should reconsider the ordering here.
593 if (!DisableBasicAA)
594 AAR->addAAResult(getAnalysis<BasicAAWrapperPass>().getResult());
595
596 // Populate the results with the currently available AAs.
597 if (auto *WrapperPass = getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
598 AAR->addAAResult(WrapperPass->getResult());
599 if (auto *WrapperPass = getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
600 AAR->addAAResult(WrapperPass->getResult());
601 if (auto *WrapperPass =
602 getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
603 AAR->addAAResult(WrapperPass->getResult());
604 if (auto *WrapperPass = getAnalysisIfAvailable<GlobalsAAWrapperPass>())
605 AAR->addAAResult(WrapperPass->getResult());
606 if (auto *WrapperPass = getAnalysisIfAvailable<SCEVAAWrapperPass>())
607 AAR->addAAResult(WrapperPass->getResult());
608 if (auto *WrapperPass = getAnalysisIfAvailable<CFLAndersAAWrapperPass>())
609 AAR->addAAResult(WrapperPass->getResult());
610 if (auto *WrapperPass = getAnalysisIfAvailable<CFLSteensAAWrapperPass>())
611 AAR->addAAResult(WrapperPass->getResult());
612
613 // If available, run an external AA providing callback over the results as
614 // well.
615 if (auto *WrapperPass = getAnalysisIfAvailable<ExternalAAWrapperPass>())
616 if (WrapperPass->CB)
617 WrapperPass->CB(*this, F, *AAR);
618
619 // Analyses don't mutate the IR, so return false.
620 return false;
621 }
622
getAnalysisUsage(AnalysisUsage & AU) const623 void AAResultsWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
624 AU.setPreservesAll();
625 AU.addRequired<BasicAAWrapperPass>();
626 AU.addRequired<TargetLibraryInfoWrapperPass>();
627
628 // We also need to mark all the alias analysis passes we will potentially
629 // probe in runOnFunction as used here to ensure the legacy pass manager
630 // preserves them. This hard coding of lists of alias analyses is specific to
631 // the legacy pass manager.
632 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
633 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
634 AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
635 AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
636 AU.addUsedIfAvailable<SCEVAAWrapperPass>();
637 AU.addUsedIfAvailable<CFLAndersAAWrapperPass>();
638 AU.addUsedIfAvailable<CFLSteensAAWrapperPass>();
639 }
640
createLegacyPMAAResults(Pass & P,Function & F,BasicAAResult & BAR)641 AAResults llvm::createLegacyPMAAResults(Pass &P, Function &F,
642 BasicAAResult &BAR) {
643 AAResults AAR(P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI());
644
645 // Add in our explicitly constructed BasicAA results.
646 if (!DisableBasicAA)
647 AAR.addAAResult(BAR);
648
649 // Populate the results with the other currently available AAs.
650 if (auto *WrapperPass =
651 P.getAnalysisIfAvailable<ScopedNoAliasAAWrapperPass>())
652 AAR.addAAResult(WrapperPass->getResult());
653 if (auto *WrapperPass = P.getAnalysisIfAvailable<TypeBasedAAWrapperPass>())
654 AAR.addAAResult(WrapperPass->getResult());
655 if (auto *WrapperPass =
656 P.getAnalysisIfAvailable<objcarc::ObjCARCAAWrapperPass>())
657 AAR.addAAResult(WrapperPass->getResult());
658 if (auto *WrapperPass = P.getAnalysisIfAvailable<GlobalsAAWrapperPass>())
659 AAR.addAAResult(WrapperPass->getResult());
660 if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLAndersAAWrapperPass>())
661 AAR.addAAResult(WrapperPass->getResult());
662 if (auto *WrapperPass = P.getAnalysisIfAvailable<CFLSteensAAWrapperPass>())
663 AAR.addAAResult(WrapperPass->getResult());
664
665 return AAR;
666 }
667
isNoAliasCall(const Value * V)668 bool llvm::isNoAliasCall(const Value *V) {
669 if (auto CS = ImmutableCallSite(V))
670 return CS.paramHasAttr(0, Attribute::NoAlias);
671 return false;
672 }
673
isNoAliasArgument(const Value * V)674 bool llvm::isNoAliasArgument(const Value *V) {
675 if (const Argument *A = dyn_cast<Argument>(V))
676 return A->hasNoAliasAttr();
677 return false;
678 }
679
isIdentifiedObject(const Value * V)680 bool llvm::isIdentifiedObject(const Value *V) {
681 if (isa<AllocaInst>(V))
682 return true;
683 if (isa<GlobalValue>(V) && !isa<GlobalAlias>(V))
684 return true;
685 if (isNoAliasCall(V))
686 return true;
687 if (const Argument *A = dyn_cast<Argument>(V))
688 return A->hasNoAliasAttr() || A->hasByValAttr();
689 return false;
690 }
691
isIdentifiedFunctionLocal(const Value * V)692 bool llvm::isIdentifiedFunctionLocal(const Value *V) {
693 return isa<AllocaInst>(V) || isNoAliasCall(V) || isNoAliasArgument(V);
694 }
695
getAAResultsAnalysisUsage(AnalysisUsage & AU)696 void llvm::getAAResultsAnalysisUsage(AnalysisUsage &AU) {
697 // This function needs to be in sync with llvm::createLegacyPMAAResults -- if
698 // more alias analyses are added to llvm::createLegacyPMAAResults, they need
699 // to be added here also.
700 AU.addRequired<TargetLibraryInfoWrapperPass>();
701 AU.addUsedIfAvailable<ScopedNoAliasAAWrapperPass>();
702 AU.addUsedIfAvailable<TypeBasedAAWrapperPass>();
703 AU.addUsedIfAvailable<objcarc::ObjCARCAAWrapperPass>();
704 AU.addUsedIfAvailable<GlobalsAAWrapperPass>();
705 AU.addUsedIfAvailable<CFLAndersAAWrapperPass>();
706 AU.addUsedIfAvailable<CFLSteensAAWrapperPass>();
707 }
708