1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines the primary stateless implementation of the
11 // Alias Analysis interface that implements identities (two different
12 // globals cannot alias, etc), but does no stateful analysis.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Analysis/BasicAliasAnalysis.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/CFG.h"
21 #include "llvm/Analysis/CaptureTracking.h"
22 #include "llvm/Analysis/InstructionSimplify.h"
23 #include "llvm/Analysis/LoopInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/IR/DerivedTypes.h"
30 #include "llvm/IR/Dominators.h"
31 #include "llvm/IR/GlobalAlias.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instructions.h"
34 #include "llvm/IR/IntrinsicInst.h"
35 #include "llvm/IR/LLVMContext.h"
36 #include "llvm/IR/Operator.h"
37 #include "llvm/Pass.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include <algorithm>
40 using namespace llvm;
41
42 /// Enable analysis of recursive PHI nodes.
43 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
44 cl::init(false));
45
46 /// SearchLimitReached / SearchTimes shows how often the limit of
47 /// to decompose GEPs is reached. It will affect the precision
48 /// of basic alias analysis.
49 #define DEBUG_TYPE "basicaa"
50 STATISTIC(SearchLimitReached, "Number of times the limit to "
51 "decompose GEPs is reached");
52 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
53
54 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
55 /// in a cycle. Because we are analysing 'through' phi nodes we need to be
56 /// careful with value equivalence. We use reachability to make sure a value
57 /// cannot be involved in a cycle.
58 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
59
60 // The max limit of the search depth in DecomposeGEPExpression() and
61 // GetUnderlyingObject(), both functions need to use the same search
62 // depth otherwise the algorithm in aliasGEP will assert.
63 static const unsigned MaxLookupSearchDepth = 6;
64
65 //===----------------------------------------------------------------------===//
66 // Useful predicates
67 //===----------------------------------------------------------------------===//
68
69 /// Returns true if the pointer is to a function-local object that never
70 /// escapes from the function.
isNonEscapingLocalObject(const Value * V)71 static bool isNonEscapingLocalObject(const Value *V) {
72 // If this is a local allocation, check to see if it escapes.
73 if (isa<AllocaInst>(V) || isNoAliasCall(V))
74 // Set StoreCaptures to True so that we can assume in our callers that the
75 // pointer is not the result of a load instruction. Currently
76 // PointerMayBeCaptured doesn't have any special analysis for the
77 // StoreCaptures=false case; if it did, our callers could be refined to be
78 // more precise.
79 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
80
81 // If this is an argument that corresponds to a byval or noalias argument,
82 // then it has not escaped before entering the function. Check if it escapes
83 // inside the function.
84 if (const Argument *A = dyn_cast<Argument>(V))
85 if (A->hasByValAttr() || A->hasNoAliasAttr())
86 // Note even if the argument is marked nocapture we still need to check
87 // for copies made inside the function. The nocapture attribute only
88 // specifies that there are no copies made that outlive the function.
89 return !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
90
91 return false;
92 }
93
94 /// Returns true if the pointer is one which would have been considered an
95 /// escape by isNonEscapingLocalObject.
isEscapeSource(const Value * V)96 static bool isEscapeSource(const Value *V) {
97 if (isa<CallInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V))
98 return true;
99
100 // The load case works because isNonEscapingLocalObject considers all
101 // stores to be escapes (it passes true for the StoreCaptures argument
102 // to PointerMayBeCaptured).
103 if (isa<LoadInst>(V))
104 return true;
105
106 return false;
107 }
108
109 /// Returns the size of the object specified by V, or UnknownSize if unknown.
getObjectSize(const Value * V,const DataLayout & DL,const TargetLibraryInfo & TLI,bool RoundToAlign=false)110 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
111 const TargetLibraryInfo &TLI,
112 bool RoundToAlign = false) {
113 uint64_t Size;
114 if (getObjectSize(V, Size, DL, &TLI, RoundToAlign))
115 return Size;
116 return MemoryLocation::UnknownSize;
117 }
118
119 /// Returns true if we can prove that the object specified by V is smaller than
120 /// Size.
isObjectSmallerThan(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI)121 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
122 const DataLayout &DL,
123 const TargetLibraryInfo &TLI) {
124 // Note that the meanings of the "object" are slightly different in the
125 // following contexts:
126 // c1: llvm::getObjectSize()
127 // c2: llvm.objectsize() intrinsic
128 // c3: isObjectSmallerThan()
129 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
130 // refers to the "entire object".
131 //
132 // Consider this example:
133 // char *p = (char*)malloc(100)
134 // char *q = p+80;
135 //
136 // In the context of c1 and c2, the "object" pointed by q refers to the
137 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
138 //
139 // However, in the context of c3, the "object" refers to the chunk of memory
140 // being allocated. So, the "object" has 100 bytes, and q points to the middle
141 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
142 // parameter, before the llvm::getObjectSize() is called to get the size of
143 // entire object, we should:
144 // - either rewind the pointer q to the base-address of the object in
145 // question (in this case rewind to p), or
146 // - just give up. It is up to caller to make sure the pointer is pointing
147 // to the base address the object.
148 //
149 // We go for 2nd option for simplicity.
150 if (!isIdentifiedObject(V))
151 return false;
152
153 // This function needs to use the aligned object size because we allow
154 // reads a bit past the end given sufficient alignment.
155 uint64_t ObjectSize = getObjectSize(V, DL, TLI, /*RoundToAlign*/ true);
156
157 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
158 }
159
160 /// Returns true if we can prove that the object specified by V has size Size.
isObjectSize(const Value * V,uint64_t Size,const DataLayout & DL,const TargetLibraryInfo & TLI)161 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
162 const TargetLibraryInfo &TLI) {
163 uint64_t ObjectSize = getObjectSize(V, DL, TLI);
164 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
165 }
166
167 //===----------------------------------------------------------------------===//
168 // GetElementPtr Instruction Decomposition and Analysis
169 //===----------------------------------------------------------------------===//
170
171 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
172 /// B are constant integers.
173 ///
174 /// Returns the scale and offset values as APInts and return V as a Value*, and
175 /// return whether we looked through any sign or zero extends. The incoming
176 /// Value is known to have IntegerType and it may already be sign or zero
177 /// extended.
178 ///
179 /// Note that this looks through extends, so the high bits may not be
180 /// represented in the result.
GetLinearExpression(const Value * V,APInt & Scale,APInt & Offset,unsigned & ZExtBits,unsigned & SExtBits,const DataLayout & DL,unsigned Depth,AssumptionCache * AC,DominatorTree * DT,bool & NSW,bool & NUW)181 /*static*/ const Value *BasicAAResult::GetLinearExpression(
182 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
183 unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
184 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
185 assert(V->getType()->isIntegerTy() && "Not an integer value");
186
187 // Limit our recursion depth.
188 if (Depth == 6) {
189 Scale = 1;
190 Offset = 0;
191 return V;
192 }
193
194 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
195 // if it's a constant, just convert it to an offset and remove the variable.
196 // If we've been called recursively the Offset bit width will be greater
197 // than the constant's (the Offset's always as wide as the outermost call),
198 // so we'll zext here and process any extension in the isa<SExtInst> &
199 // isa<ZExtInst> cases below.
200 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
201 assert(Scale == 0 && "Constant values don't have a scale");
202 return V;
203 }
204
205 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
206 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
207
208 // If we've been called recursively then Offset and Scale will be wider
209 // that the BOp operands. We'll always zext it here as we'll process sign
210 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
211 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
212
213 switch (BOp->getOpcode()) {
214 default:
215 // We don't understand this instruction, so we can't decompose it any
216 // further.
217 Scale = 1;
218 Offset = 0;
219 return V;
220 case Instruction::Or:
221 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
222 // analyze it.
223 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
224 BOp, DT)) {
225 Scale = 1;
226 Offset = 0;
227 return V;
228 }
229 // FALL THROUGH.
230 case Instruction::Add:
231 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
232 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
233 Offset += RHS;
234 break;
235 case Instruction::Sub:
236 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
237 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
238 Offset -= RHS;
239 break;
240 case Instruction::Mul:
241 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
242 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
243 Offset *= RHS;
244 Scale *= RHS;
245 break;
246 case Instruction::Shl:
247 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
248 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
249 Offset <<= RHS.getLimitedValue();
250 Scale <<= RHS.getLimitedValue();
251 // the semantics of nsw and nuw for left shifts don't match those of
252 // multiplications, so we won't propagate them.
253 NSW = NUW = false;
254 return V;
255 }
256
257 if (isa<OverflowingBinaryOperator>(BOp)) {
258 NUW &= BOp->hasNoUnsignedWrap();
259 NSW &= BOp->hasNoSignedWrap();
260 }
261 return V;
262 }
263 }
264
265 // Since GEP indices are sign extended anyway, we don't care about the high
266 // bits of a sign or zero extended value - just scales and offsets. The
267 // extensions have to be consistent though.
268 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
269 Value *CastOp = cast<CastInst>(V)->getOperand(0);
270 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
271 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
272 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
273 const Value *Result =
274 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
275 Depth + 1, AC, DT, NSW, NUW);
276
277 // zext(zext(%x)) == zext(%x), and similiarly for sext; we'll handle this
278 // by just incrementing the number of bits we've extended by.
279 unsigned ExtendedBy = NewWidth - SmallWidth;
280
281 if (isa<SExtInst>(V) && ZExtBits == 0) {
282 // sext(sext(%x, a), b) == sext(%x, a + b)
283
284 if (NSW) {
285 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
286 // into sext(%x) + sext(c). We'll sext the Offset ourselves:
287 unsigned OldWidth = Offset.getBitWidth();
288 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
289 } else {
290 // We may have signed-wrapped, so don't decompose sext(%x + c) into
291 // sext(%x) + sext(c)
292 Scale = 1;
293 Offset = 0;
294 Result = CastOp;
295 ZExtBits = OldZExtBits;
296 SExtBits = OldSExtBits;
297 }
298 SExtBits += ExtendedBy;
299 } else {
300 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
301
302 if (!NUW) {
303 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
304 // zext(%x) + zext(c)
305 Scale = 1;
306 Offset = 0;
307 Result = CastOp;
308 ZExtBits = OldZExtBits;
309 SExtBits = OldSExtBits;
310 }
311 ZExtBits += ExtendedBy;
312 }
313
314 return Result;
315 }
316
317 Scale = 1;
318 Offset = 0;
319 return V;
320 }
321
322 /// If V is a symbolic pointer expression, decompose it into a base pointer
323 /// with a constant offset and a number of scaled symbolic offsets.
324 ///
325 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
326 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
327 /// specified amount, but which may have other unrepresented high bits. As
328 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
329 ///
330 /// When DataLayout is around, this function is capable of analyzing everything
331 /// that GetUnderlyingObject can look through. To be able to do that
332 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
333 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
334 /// through pointer casts.
DecomposeGEPExpression(const Value * V,int64_t & BaseOffs,SmallVectorImpl<VariableGEPIndex> & VarIndices,bool & MaxLookupReached,const DataLayout & DL,AssumptionCache * AC,DominatorTree * DT)335 /*static*/ const Value *BasicAAResult::DecomposeGEPExpression(
336 const Value *V, int64_t &BaseOffs,
337 SmallVectorImpl<VariableGEPIndex> &VarIndices, bool &MaxLookupReached,
338 const DataLayout &DL, AssumptionCache *AC, DominatorTree *DT) {
339 // Limit recursion depth to limit compile time in crazy cases.
340 unsigned MaxLookup = MaxLookupSearchDepth;
341 MaxLookupReached = false;
342 SearchTimes++;
343
344 BaseOffs = 0;
345 do {
346 // See if this is a bitcast or GEP.
347 const Operator *Op = dyn_cast<Operator>(V);
348 if (!Op) {
349 // The only non-operator case we can handle are GlobalAliases.
350 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
351 if (!GA->mayBeOverridden()) {
352 V = GA->getAliasee();
353 continue;
354 }
355 }
356 return V;
357 }
358
359 if (Op->getOpcode() == Instruction::BitCast ||
360 Op->getOpcode() == Instruction::AddrSpaceCast) {
361 V = Op->getOperand(0);
362 continue;
363 }
364
365 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
366 if (!GEPOp) {
367 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
368 // can come up with something. This matches what GetUnderlyingObject does.
369 if (const Instruction *I = dyn_cast<Instruction>(V))
370 // TODO: Get a DominatorTree and AssumptionCache and use them here
371 // (these are both now available in this function, but this should be
372 // updated when GetUnderlyingObject is updated). TLI should be
373 // provided also.
374 if (const Value *Simplified =
375 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
376 V = Simplified;
377 continue;
378 }
379
380 return V;
381 }
382
383 // Don't attempt to analyze GEPs over unsized objects.
384 if (!GEPOp->getOperand(0)->getType()->getPointerElementType()->isSized())
385 return V;
386
387 unsigned AS = GEPOp->getPointerAddressSpace();
388 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
389 gep_type_iterator GTI = gep_type_begin(GEPOp);
390 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
391 I != E; ++I) {
392 const Value *Index = *I;
393 // Compute the (potentially symbolic) offset in bytes for this index.
394 if (StructType *STy = dyn_cast<StructType>(*GTI++)) {
395 // For a struct, add the member offset.
396 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
397 if (FieldNo == 0)
398 continue;
399
400 BaseOffs += DL.getStructLayout(STy)->getElementOffset(FieldNo);
401 continue;
402 }
403
404 // For an array/pointer, add the element offset, explicitly scaled.
405 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
406 if (CIdx->isZero())
407 continue;
408 BaseOffs += DL.getTypeAllocSize(*GTI) * CIdx->getSExtValue();
409 continue;
410 }
411
412 uint64_t Scale = DL.getTypeAllocSize(*GTI);
413 unsigned ZExtBits = 0, SExtBits = 0;
414
415 // If the integer type is smaller than the pointer size, it is implicitly
416 // sign extended to pointer size.
417 unsigned Width = Index->getType()->getIntegerBitWidth();
418 unsigned PointerSize = DL.getPointerSizeInBits(AS);
419 if (PointerSize > Width)
420 SExtBits += PointerSize - Width;
421
422 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
423 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
424 bool NSW = true, NUW = true;
425 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
426 SExtBits, DL, 0, AC, DT, NSW, NUW);
427
428 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
429 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
430 BaseOffs += IndexOffset.getSExtValue() * Scale;
431 Scale *= IndexScale.getSExtValue();
432
433 // If we already had an occurrence of this index variable, merge this
434 // scale into it. For example, we want to handle:
435 // A[x][x] -> x*16 + x*4 -> x*20
436 // This also ensures that 'x' only appears in the index list once.
437 for (unsigned i = 0, e = VarIndices.size(); i != e; ++i) {
438 if (VarIndices[i].V == Index && VarIndices[i].ZExtBits == ZExtBits &&
439 VarIndices[i].SExtBits == SExtBits) {
440 Scale += VarIndices[i].Scale;
441 VarIndices.erase(VarIndices.begin() + i);
442 break;
443 }
444 }
445
446 // Make sure that we have a scale that makes sense for this target's
447 // pointer size.
448 if (unsigned ShiftBits = 64 - PointerSize) {
449 Scale <<= ShiftBits;
450 Scale = (int64_t)Scale >> ShiftBits;
451 }
452
453 if (Scale) {
454 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits,
455 static_cast<int64_t>(Scale)};
456 VarIndices.push_back(Entry);
457 }
458 }
459
460 // Analyze the base pointer next.
461 V = GEPOp->getOperand(0);
462 } while (--MaxLookup);
463
464 // If the chain of expressions is too deep, just return early.
465 MaxLookupReached = true;
466 SearchLimitReached++;
467 return V;
468 }
469
470 /// Returns whether the given pointer value points to memory that is local to
471 /// the function, with global constants being considered local to all
472 /// functions.
pointsToConstantMemory(const MemoryLocation & Loc,bool OrLocal)473 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
474 bool OrLocal) {
475 assert(Visited.empty() && "Visited must be cleared after use!");
476
477 unsigned MaxLookup = 8;
478 SmallVector<const Value *, 16> Worklist;
479 Worklist.push_back(Loc.Ptr);
480 do {
481 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
482 if (!Visited.insert(V).second) {
483 Visited.clear();
484 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
485 }
486
487 // An alloca instruction defines local memory.
488 if (OrLocal && isa<AllocaInst>(V))
489 continue;
490
491 // A global constant counts as local memory for our purposes.
492 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
493 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
494 // global to be marked constant in some modules and non-constant in
495 // others. GV may even be a declaration, not a definition.
496 if (!GV->isConstant()) {
497 Visited.clear();
498 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
499 }
500 continue;
501 }
502
503 // If both select values point to local memory, then so does the select.
504 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
505 Worklist.push_back(SI->getTrueValue());
506 Worklist.push_back(SI->getFalseValue());
507 continue;
508 }
509
510 // If all values incoming to a phi node point to local memory, then so does
511 // the phi.
512 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
513 // Don't bother inspecting phi nodes with many operands.
514 if (PN->getNumIncomingValues() > MaxLookup) {
515 Visited.clear();
516 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
517 }
518 for (Value *IncValue : PN->incoming_values())
519 Worklist.push_back(IncValue);
520 continue;
521 }
522
523 // Otherwise be conservative.
524 Visited.clear();
525 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
526
527 } while (!Worklist.empty() && --MaxLookup);
528
529 Visited.clear();
530 return Worklist.empty();
531 }
532
533 // FIXME: This code is duplicated with MemoryLocation and should be hoisted to
534 // some common utility location.
isMemsetPattern16(const Function * MS,const TargetLibraryInfo & TLI)535 static bool isMemsetPattern16(const Function *MS,
536 const TargetLibraryInfo &TLI) {
537 if (TLI.has(LibFunc::memset_pattern16) &&
538 MS->getName() == "memset_pattern16") {
539 FunctionType *MemsetType = MS->getFunctionType();
540 if (!MemsetType->isVarArg() && MemsetType->getNumParams() == 3 &&
541 isa<PointerType>(MemsetType->getParamType(0)) &&
542 isa<PointerType>(MemsetType->getParamType(1)) &&
543 isa<IntegerType>(MemsetType->getParamType(2)))
544 return true;
545 }
546
547 return false;
548 }
549
550 /// Returns the behavior when calling the given call site.
getModRefBehavior(ImmutableCallSite CS)551 FunctionModRefBehavior BasicAAResult::getModRefBehavior(ImmutableCallSite CS) {
552 if (CS.doesNotAccessMemory())
553 // Can't do better than this.
554 return FMRB_DoesNotAccessMemory;
555
556 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
557
558 // If the callsite knows it only reads memory, don't return worse
559 // than that.
560 if (CS.onlyReadsMemory())
561 Min = FMRB_OnlyReadsMemory;
562
563 if (CS.onlyAccessesArgMemory())
564 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
565
566 // The AAResultBase base class has some smarts, lets use them.
567 return FunctionModRefBehavior(AAResultBase::getModRefBehavior(CS) & Min);
568 }
569
570 /// Returns the behavior when calling the given function. For use when the call
571 /// site is not known.
getModRefBehavior(const Function * F)572 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
573 // If the function declares it doesn't access memory, we can't do better.
574 if (F->doesNotAccessMemory())
575 return FMRB_DoesNotAccessMemory;
576
577 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
578
579 // If the function declares it only reads memory, go with that.
580 if (F->onlyReadsMemory())
581 Min = FMRB_OnlyReadsMemory;
582
583 if (F->onlyAccessesArgMemory())
584 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
585
586 if (isMemsetPattern16(F, TLI))
587 Min = FMRB_OnlyAccessesArgumentPointees;
588
589 // Otherwise be conservative.
590 return FunctionModRefBehavior(AAResultBase::getModRefBehavior(F) & Min);
591 }
592
getArgModRefInfo(ImmutableCallSite CS,unsigned ArgIdx)593 ModRefInfo BasicAAResult::getArgModRefInfo(ImmutableCallSite CS,
594 unsigned ArgIdx) {
595 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction()))
596 switch (II->getIntrinsicID()) {
597 default:
598 break;
599 case Intrinsic::memset:
600 case Intrinsic::memcpy:
601 case Intrinsic::memmove:
602 assert((ArgIdx == 0 || ArgIdx == 1) &&
603 "Invalid argument index for memory intrinsic");
604 return ArgIdx ? MRI_Ref : MRI_Mod;
605 }
606
607 // We can bound the aliasing properties of memset_pattern16 just as we can
608 // for memcpy/memset. This is particularly important because the
609 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
610 // whenever possible.
611 if (CS.getCalledFunction() &&
612 isMemsetPattern16(CS.getCalledFunction(), TLI)) {
613 assert((ArgIdx == 0 || ArgIdx == 1) &&
614 "Invalid argument index for memset_pattern16");
615 return ArgIdx ? MRI_Ref : MRI_Mod;
616 }
617 // FIXME: Handle memset_pattern4 and memset_pattern8 also.
618
619 if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadOnly))
620 return MRI_Ref;
621
622 if (CS.paramHasAttr(ArgIdx + 1, Attribute::ReadNone))
623 return MRI_NoModRef;
624
625 return AAResultBase::getArgModRefInfo(CS, ArgIdx);
626 }
627
isAssumeIntrinsic(ImmutableCallSite CS)628 static bool isAssumeIntrinsic(ImmutableCallSite CS) {
629 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction());
630 return II && II->getIntrinsicID() == Intrinsic::assume;
631 }
632
633 #ifndef NDEBUG
getParent(const Value * V)634 static const Function *getParent(const Value *V) {
635 if (const Instruction *inst = dyn_cast<Instruction>(V))
636 return inst->getParent()->getParent();
637
638 if (const Argument *arg = dyn_cast<Argument>(V))
639 return arg->getParent();
640
641 return nullptr;
642 }
643
notDifferentParent(const Value * O1,const Value * O2)644 static bool notDifferentParent(const Value *O1, const Value *O2) {
645
646 const Function *F1 = getParent(O1);
647 const Function *F2 = getParent(O2);
648
649 return !F1 || !F2 || F1 == F2;
650 }
651 #endif
652
alias(const MemoryLocation & LocA,const MemoryLocation & LocB)653 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
654 const MemoryLocation &LocB) {
655 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
656 "BasicAliasAnalysis doesn't support interprocedural queries.");
657
658 // If we have a directly cached entry for these locations, we have recursed
659 // through this once, so just return the cached results. Notably, when this
660 // happens, we don't clear the cache.
661 auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
662 if (CacheIt != AliasCache.end())
663 return CacheIt->second;
664
665 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
666 LocB.Size, LocB.AATags);
667 // AliasCache rarely has more than 1 or 2 elements, always use
668 // shrink_and_clear so it quickly returns to the inline capacity of the
669 // SmallDenseMap if it ever grows larger.
670 // FIXME: This should really be shrink_to_inline_capacity_and_clear().
671 AliasCache.shrink_and_clear();
672 VisitedPhiBBs.clear();
673 return Alias;
674 }
675
676 /// Checks to see if the specified callsite can clobber the specified memory
677 /// object.
678 ///
679 /// Since we only look at local properties of this function, we really can't
680 /// say much about this query. We do, however, use simple "address taken"
681 /// analysis on local objects.
getModRefInfo(ImmutableCallSite CS,const MemoryLocation & Loc)682 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS,
683 const MemoryLocation &Loc) {
684 assert(notDifferentParent(CS.getInstruction(), Loc.Ptr) &&
685 "AliasAnalysis query involving multiple functions!");
686
687 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
688
689 // If this is a tail call and Loc.Ptr points to a stack location, we know that
690 // the tail call cannot access or modify the local stack.
691 // We cannot exclude byval arguments here; these belong to the caller of
692 // the current function not to the current function, and a tail callee
693 // may reference them.
694 if (isa<AllocaInst>(Object))
695 if (const CallInst *CI = dyn_cast<CallInst>(CS.getInstruction()))
696 if (CI->isTailCall())
697 return MRI_NoModRef;
698
699 // If the pointer is to a locally allocated object that does not escape,
700 // then the call can not mod/ref the pointer unless the call takes the pointer
701 // as an argument, and itself doesn't capture it.
702 if (!isa<Constant>(Object) && CS.getInstruction() != Object &&
703 isNonEscapingLocalObject(Object)) {
704 bool PassedAsArg = false;
705 unsigned ArgNo = 0;
706 for (ImmutableCallSite::arg_iterator CI = CS.arg_begin(), CE = CS.arg_end();
707 CI != CE; ++CI, ++ArgNo) {
708 // Only look at the no-capture or byval pointer arguments. If this
709 // pointer were passed to arguments that were neither of these, then it
710 // couldn't be no-capture.
711 if (!(*CI)->getType()->isPointerTy() ||
712 (!CS.doesNotCapture(ArgNo) && !CS.isByValArgument(ArgNo)))
713 continue;
714
715 // If this is a no-capture pointer argument, see if we can tell that it
716 // is impossible to alias the pointer we're checking. If not, we have to
717 // assume that the call could touch the pointer, even though it doesn't
718 // escape.
719 AliasResult AR =
720 getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
721 if (AR) {
722 PassedAsArg = true;
723 break;
724 }
725 }
726
727 if (!PassedAsArg)
728 return MRI_NoModRef;
729 }
730
731 // While the assume intrinsic is marked as arbitrarily writing so that
732 // proper control dependencies will be maintained, it never aliases any
733 // particular memory location.
734 if (isAssumeIntrinsic(CS))
735 return MRI_NoModRef;
736
737 // The AAResultBase base class has some smarts, lets use them.
738 return AAResultBase::getModRefInfo(CS, Loc);
739 }
740
getModRefInfo(ImmutableCallSite CS1,ImmutableCallSite CS2)741 ModRefInfo BasicAAResult::getModRefInfo(ImmutableCallSite CS1,
742 ImmutableCallSite CS2) {
743 // While the assume intrinsic is marked as arbitrarily writing so that
744 // proper control dependencies will be maintained, it never aliases any
745 // particular memory location.
746 if (isAssumeIntrinsic(CS1) || isAssumeIntrinsic(CS2))
747 return MRI_NoModRef;
748
749 // The AAResultBase base class has some smarts, lets use them.
750 return AAResultBase::getModRefInfo(CS1, CS2);
751 }
752
753 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
754 /// both having the exact same pointer operand.
aliasSameBasePointerGEPs(const GEPOperator * GEP1,uint64_t V1Size,const GEPOperator * GEP2,uint64_t V2Size,const DataLayout & DL)755 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
756 uint64_t V1Size,
757 const GEPOperator *GEP2,
758 uint64_t V2Size,
759 const DataLayout &DL) {
760
761 assert(GEP1->getPointerOperand() == GEP2->getPointerOperand() &&
762 "Expected GEPs with the same pointer operand");
763
764 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
765 // such that the struct field accesses provably cannot alias.
766 // We also need at least two indices (the pointer, and the struct field).
767 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
768 GEP1->getNumIndices() < 2)
769 return MayAlias;
770
771 // If we don't know the size of the accesses through both GEPs, we can't
772 // determine whether the struct fields accessed can't alias.
773 if (V1Size == MemoryLocation::UnknownSize ||
774 V2Size == MemoryLocation::UnknownSize)
775 return MayAlias;
776
777 ConstantInt *C1 =
778 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
779 ConstantInt *C2 =
780 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
781
782 // If the last (struct) indices are constants and are equal, the other indices
783 // might be also be dynamically equal, so the GEPs can alias.
784 if (C1 && C2 && C1 == C2)
785 return MayAlias;
786
787 // Find the last-indexed type of the GEP, i.e., the type you'd get if
788 // you stripped the last index.
789 // On the way, look at each indexed type. If there's something other
790 // than an array, different indices can lead to different final types.
791 SmallVector<Value *, 8> IntermediateIndices;
792
793 // Insert the first index; we don't need to check the type indexed
794 // through it as it only drops the pointer indirection.
795 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
796 IntermediateIndices.push_back(GEP1->getOperand(1));
797
798 // Insert all the remaining indices but the last one.
799 // Also, check that they all index through arrays.
800 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
801 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
802 GEP1->getSourceElementType(), IntermediateIndices)))
803 return MayAlias;
804 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
805 }
806
807 auto *Ty = GetElementPtrInst::getIndexedType(
808 GEP1->getSourceElementType(), IntermediateIndices);
809 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
810
811 if (isa<SequentialType>(Ty)) {
812 // We know that:
813 // - both GEPs begin indexing from the exact same pointer;
814 // - the last indices in both GEPs are constants, indexing into a sequential
815 // type (array or pointer);
816 // - both GEPs only index through arrays prior to that.
817 //
818 // Because array indices greater than the number of elements are valid in
819 // GEPs, unless we know the intermediate indices are identical between
820 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
821 // partially overlap. We also need to check that the loaded size matches
822 // the element size, otherwise we could still have overlap.
823 const uint64_t ElementSize =
824 DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
825 if (V1Size != ElementSize || V2Size != ElementSize)
826 return MayAlias;
827
828 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
829 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
830 return MayAlias;
831
832 // Now we know that the array/pointer that GEP1 indexes into and that
833 // that GEP2 indexes into must either precisely overlap or be disjoint.
834 // Because they cannot partially overlap and because fields in an array
835 // cannot overlap, if we can prove the final indices are different between
836 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
837
838 // If the last indices are constants, we've already checked they don't
839 // equal each other so we can exit early.
840 if (C1 && C2)
841 return NoAlias;
842 if (isKnownNonEqual(GEP1->getOperand(GEP1->getNumOperands() - 1),
843 GEP2->getOperand(GEP2->getNumOperands() - 1),
844 DL))
845 return NoAlias;
846 return MayAlias;
847 } else if (!LastIndexedStruct || !C1 || !C2) {
848 return MayAlias;
849 }
850
851 // We know that:
852 // - both GEPs begin indexing from the exact same pointer;
853 // - the last indices in both GEPs are constants, indexing into a struct;
854 // - said indices are different, hence, the pointed-to fields are different;
855 // - both GEPs only index through arrays prior to that.
856 //
857 // This lets us determine that the struct that GEP1 indexes into and the
858 // struct that GEP2 indexes into must either precisely overlap or be
859 // completely disjoint. Because they cannot partially overlap, indexing into
860 // different non-overlapping fields of the struct will never alias.
861
862 // Therefore, the only remaining thing needed to show that both GEPs can't
863 // alias is that the fields are not overlapping.
864 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
865 const uint64_t StructSize = SL->getSizeInBytes();
866 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
867 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
868
869 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
870 uint64_t V2Off, uint64_t V2Size) {
871 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
872 ((V2Off + V2Size <= StructSize) ||
873 (V2Off + V2Size - StructSize <= V1Off));
874 };
875
876 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
877 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
878 return NoAlias;
879
880 return MayAlias;
881 }
882
883 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
884 /// another pointer.
885 ///
886 /// We know that V1 is a GEP, but we don't know anything about V2.
887 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
888 /// V2.
aliasGEP(const GEPOperator * GEP1,uint64_t V1Size,const AAMDNodes & V1AAInfo,const Value * V2,uint64_t V2Size,const AAMDNodes & V2AAInfo,const Value * UnderlyingV1,const Value * UnderlyingV2)889 AliasResult BasicAAResult::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size,
890 const AAMDNodes &V1AAInfo, const Value *V2,
891 uint64_t V2Size, const AAMDNodes &V2AAInfo,
892 const Value *UnderlyingV1,
893 const Value *UnderlyingV2) {
894 int64_t GEP1BaseOffset;
895 bool GEP1MaxLookupReached;
896 SmallVector<VariableGEPIndex, 4> GEP1VariableIndices;
897
898 // If we have two gep instructions with must-alias or not-alias'ing base
899 // pointers, figure out if the indexes to the GEP tell us anything about the
900 // derived pointer.
901 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
902 // Do the base pointers alias?
903 AliasResult BaseAlias =
904 aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize, AAMDNodes(),
905 UnderlyingV2, MemoryLocation::UnknownSize, AAMDNodes());
906
907 // Check for geps of non-aliasing underlying pointers where the offsets are
908 // identical.
909 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
910 // Do the base pointers alias assuming type and size.
911 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
912 UnderlyingV2, V2Size, V2AAInfo);
913 if (PreciseBaseAlias == NoAlias) {
914 // See if the computed offset from the common pointer tells us about the
915 // relation of the resulting pointer.
916 int64_t GEP2BaseOffset;
917 bool GEP2MaxLookupReached;
918 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
919 const Value *GEP2BasePtr =
920 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
921 GEP2MaxLookupReached, DL, &AC, DT);
922 const Value *GEP1BasePtr =
923 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
924 GEP1MaxLookupReached, DL, &AC, DT);
925 // DecomposeGEPExpression and GetUnderlyingObject should return the
926 // same result except when DecomposeGEPExpression has no DataLayout.
927 // FIXME: They always have a DataLayout so this should become an
928 // assert.
929 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
930 return MayAlias;
931 }
932 // If the max search depth is reached the result is undefined
933 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
934 return MayAlias;
935
936 // Same offsets.
937 if (GEP1BaseOffset == GEP2BaseOffset &&
938 GEP1VariableIndices == GEP2VariableIndices)
939 return NoAlias;
940 GEP1VariableIndices.clear();
941 }
942 }
943
944 // If we get a No or May, then return it immediately, no amount of analysis
945 // will improve this situation.
946 if (BaseAlias != MustAlias)
947 return BaseAlias;
948
949 // Otherwise, we have a MustAlias. Since the base pointers alias each other
950 // exactly, see if the computed offset from the common pointer tells us
951 // about the relation of the resulting pointer.
952 const Value *GEP1BasePtr =
953 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
954 GEP1MaxLookupReached, DL, &AC, DT);
955
956 int64_t GEP2BaseOffset;
957 bool GEP2MaxLookupReached;
958 SmallVector<VariableGEPIndex, 4> GEP2VariableIndices;
959 const Value *GEP2BasePtr =
960 DecomposeGEPExpression(GEP2, GEP2BaseOffset, GEP2VariableIndices,
961 GEP2MaxLookupReached, DL, &AC, DT);
962
963 // DecomposeGEPExpression and GetUnderlyingObject should return the
964 // same result except when DecomposeGEPExpression has no DataLayout.
965 // FIXME: They always have a DataLayout so this should become an assert.
966 if (GEP1BasePtr != UnderlyingV1 || GEP2BasePtr != UnderlyingV2) {
967 return MayAlias;
968 }
969
970 // If we know the two GEPs are based off of the exact same pointer (and not
971 // just the same underlying object), see if that tells us anything about
972 // the resulting pointers.
973 if (GEP1->getPointerOperand() == GEP2->getPointerOperand()) {
974 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
975 // If we couldn't find anything interesting, don't abandon just yet.
976 if (R != MayAlias)
977 return R;
978 }
979
980 // If the max search depth is reached the result is undefined
981 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
982 return MayAlias;
983
984 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
985 // symbolic difference.
986 GEP1BaseOffset -= GEP2BaseOffset;
987 GetIndexDifference(GEP1VariableIndices, GEP2VariableIndices);
988
989 } else {
990 // Check to see if these two pointers are related by the getelementptr
991 // instruction. If one pointer is a GEP with a non-zero index of the other
992 // pointer, we know they cannot alias.
993
994 // If both accesses are unknown size, we can't do anything useful here.
995 if (V1Size == MemoryLocation::UnknownSize &&
996 V2Size == MemoryLocation::UnknownSize)
997 return MayAlias;
998
999 AliasResult R = aliasCheck(UnderlyingV1, MemoryLocation::UnknownSize,
1000 AAMDNodes(), V2, V2Size, V2AAInfo);
1001 if (R != MustAlias)
1002 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1003 // If V2 is known not to alias GEP base pointer, then the two values
1004 // cannot alias per GEP semantics: "A pointer value formed from a
1005 // getelementptr instruction is associated with the addresses associated
1006 // with the first operand of the getelementptr".
1007 return R;
1008
1009 const Value *GEP1BasePtr =
1010 DecomposeGEPExpression(GEP1, GEP1BaseOffset, GEP1VariableIndices,
1011 GEP1MaxLookupReached, DL, &AC, DT);
1012
1013 // DecomposeGEPExpression and GetUnderlyingObject should return the
1014 // same result except when DecomposeGEPExpression has no DataLayout.
1015 // FIXME: They always have a DataLayout so this should become an assert.
1016 if (GEP1BasePtr != UnderlyingV1) {
1017 return MayAlias;
1018 }
1019 // If the max search depth is reached the result is undefined
1020 if (GEP1MaxLookupReached)
1021 return MayAlias;
1022 }
1023
1024 // In the two GEP Case, if there is no difference in the offsets of the
1025 // computed pointers, the resultant pointers are a must alias. This
1026 // hapens when we have two lexically identical GEP's (for example).
1027 //
1028 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1029 // must aliases the GEP, the end result is a must alias also.
1030 if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty())
1031 return MustAlias;
1032
1033 // If there is a constant difference between the pointers, but the difference
1034 // is less than the size of the associated memory object, then we know
1035 // that the objects are partially overlapping. If the difference is
1036 // greater, we know they do not overlap.
1037 if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) {
1038 if (GEP1BaseOffset >= 0) {
1039 if (V2Size != MemoryLocation::UnknownSize) {
1040 if ((uint64_t)GEP1BaseOffset < V2Size)
1041 return PartialAlias;
1042 return NoAlias;
1043 }
1044 } else {
1045 // We have the situation where:
1046 // + +
1047 // | BaseOffset |
1048 // ---------------->|
1049 // |-->V1Size |-------> V2Size
1050 // GEP1 V2
1051 // We need to know that V2Size is not unknown, otherwise we might have
1052 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1053 if (V1Size != MemoryLocation::UnknownSize &&
1054 V2Size != MemoryLocation::UnknownSize) {
1055 if (-(uint64_t)GEP1BaseOffset < V1Size)
1056 return PartialAlias;
1057 return NoAlias;
1058 }
1059 }
1060 }
1061
1062 if (!GEP1VariableIndices.empty()) {
1063 uint64_t Modulo = 0;
1064 bool AllPositive = true;
1065 for (unsigned i = 0, e = GEP1VariableIndices.size(); i != e; ++i) {
1066
1067 // Try to distinguish something like &A[i][1] against &A[42][0].
1068 // Grab the least significant bit set in any of the scales. We
1069 // don't need std::abs here (even if the scale's negative) as we'll
1070 // be ^'ing Modulo with itself later.
1071 Modulo |= (uint64_t)GEP1VariableIndices[i].Scale;
1072
1073 if (AllPositive) {
1074 // If the Value could change between cycles, then any reasoning about
1075 // the Value this cycle may not hold in the next cycle. We'll just
1076 // give up if we can't determine conditions that hold for every cycle:
1077 const Value *V = GEP1VariableIndices[i].V;
1078
1079 bool SignKnownZero, SignKnownOne;
1080 ComputeSignBit(const_cast<Value *>(V), SignKnownZero, SignKnownOne, DL,
1081 0, &AC, nullptr, DT);
1082
1083 // Zero-extension widens the variable, and so forces the sign
1084 // bit to zero.
1085 bool IsZExt = GEP1VariableIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1086 SignKnownZero |= IsZExt;
1087 SignKnownOne &= !IsZExt;
1088
1089 // If the variable begins with a zero then we know it's
1090 // positive, regardless of whether the value is signed or
1091 // unsigned.
1092 int64_t Scale = GEP1VariableIndices[i].Scale;
1093 AllPositive =
1094 (SignKnownZero && Scale >= 0) || (SignKnownOne && Scale < 0);
1095 }
1096 }
1097
1098 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1099
1100 // We can compute the difference between the two addresses
1101 // mod Modulo. Check whether that difference guarantees that the
1102 // two locations do not alias.
1103 uint64_t ModOffset = (uint64_t)GEP1BaseOffset & (Modulo - 1);
1104 if (V1Size != MemoryLocation::UnknownSize &&
1105 V2Size != MemoryLocation::UnknownSize && ModOffset >= V2Size &&
1106 V1Size <= Modulo - ModOffset)
1107 return NoAlias;
1108
1109 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1110 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1111 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1112 if (AllPositive && GEP1BaseOffset > 0 && V2Size <= (uint64_t)GEP1BaseOffset)
1113 return NoAlias;
1114
1115 if (constantOffsetHeuristic(GEP1VariableIndices, V1Size, V2Size,
1116 GEP1BaseOffset, &AC, DT))
1117 return NoAlias;
1118 }
1119
1120 // Statically, we can see that the base objects are the same, but the
1121 // pointers have dynamic offsets which we can't resolve. And none of our
1122 // little tricks above worked.
1123 //
1124 // TODO: Returning PartialAlias instead of MayAlias is a mild hack; the
1125 // practical effect of this is protecting TBAA in the case of dynamic
1126 // indices into arrays of unions or malloc'd memory.
1127 return PartialAlias;
1128 }
1129
MergeAliasResults(AliasResult A,AliasResult B)1130 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1131 // If the results agree, take it.
1132 if (A == B)
1133 return A;
1134 // A mix of PartialAlias and MustAlias is PartialAlias.
1135 if ((A == PartialAlias && B == MustAlias) ||
1136 (B == PartialAlias && A == MustAlias))
1137 return PartialAlias;
1138 // Otherwise, we don't know anything.
1139 return MayAlias;
1140 }
1141
1142 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1143 /// against another.
aliasSelect(const SelectInst * SI,uint64_t SISize,const AAMDNodes & SIAAInfo,const Value * V2,uint64_t V2Size,const AAMDNodes & V2AAInfo)1144 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI, uint64_t SISize,
1145 const AAMDNodes &SIAAInfo,
1146 const Value *V2, uint64_t V2Size,
1147 const AAMDNodes &V2AAInfo) {
1148 // If the values are Selects with the same condition, we can do a more precise
1149 // check: just check for aliases between the values on corresponding arms.
1150 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1151 if (SI->getCondition() == SI2->getCondition()) {
1152 AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1153 SI2->getTrueValue(), V2Size, V2AAInfo);
1154 if (Alias == MayAlias)
1155 return MayAlias;
1156 AliasResult ThisAlias =
1157 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1158 SI2->getFalseValue(), V2Size, V2AAInfo);
1159 return MergeAliasResults(ThisAlias, Alias);
1160 }
1161
1162 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1163 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1164 AliasResult Alias =
1165 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(), SISize, SIAAInfo);
1166 if (Alias == MayAlias)
1167 return MayAlias;
1168
1169 AliasResult ThisAlias =
1170 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo);
1171 return MergeAliasResults(ThisAlias, Alias);
1172 }
1173
1174 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1175 /// another.
aliasPHI(const PHINode * PN,uint64_t PNSize,const AAMDNodes & PNAAInfo,const Value * V2,uint64_t V2Size,const AAMDNodes & V2AAInfo)1176 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, uint64_t PNSize,
1177 const AAMDNodes &PNAAInfo, const Value *V2,
1178 uint64_t V2Size,
1179 const AAMDNodes &V2AAInfo) {
1180 // Track phi nodes we have visited. We use this information when we determine
1181 // value equivalence.
1182 VisitedPhiBBs.insert(PN->getParent());
1183
1184 // If the values are PHIs in the same block, we can do a more precise
1185 // as well as efficient check: just check for aliases between the values
1186 // on corresponding edges.
1187 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1188 if (PN2->getParent() == PN->getParent()) {
1189 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1190 MemoryLocation(V2, V2Size, V2AAInfo));
1191 if (PN > V2)
1192 std::swap(Locs.first, Locs.second);
1193 // Analyse the PHIs' inputs under the assumption that the PHIs are
1194 // NoAlias.
1195 // If the PHIs are May/MustAlias there must be (recursively) an input
1196 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1197 // there must be an operation on the PHIs within the PHIs' value cycle
1198 // that causes a MayAlias.
1199 // Pretend the phis do not alias.
1200 AliasResult Alias = NoAlias;
1201 assert(AliasCache.count(Locs) &&
1202 "There must exist an entry for the phi node");
1203 AliasResult OrigAliasResult = AliasCache[Locs];
1204 AliasCache[Locs] = NoAlias;
1205
1206 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1207 AliasResult ThisAlias =
1208 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1209 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1210 V2Size, V2AAInfo);
1211 Alias = MergeAliasResults(ThisAlias, Alias);
1212 if (Alias == MayAlias)
1213 break;
1214 }
1215
1216 // Reset if speculation failed.
1217 if (Alias != NoAlias)
1218 AliasCache[Locs] = OrigAliasResult;
1219
1220 return Alias;
1221 }
1222
1223 SmallPtrSet<Value *, 4> UniqueSrc;
1224 SmallVector<Value *, 4> V1Srcs;
1225 bool isRecursive = false;
1226 for (Value *PV1 : PN->incoming_values()) {
1227 if (isa<PHINode>(PV1))
1228 // If any of the source itself is a PHI, return MayAlias conservatively
1229 // to avoid compile time explosion. The worst possible case is if both
1230 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1231 // and 'n' are the number of PHI sources.
1232 return MayAlias;
1233
1234 if (EnableRecPhiAnalysis)
1235 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1236 // Check whether the incoming value is a GEP that advances the pointer
1237 // result of this PHI node (e.g. in a loop). If this is the case, we
1238 // would recurse and always get a MayAlias. Handle this case specially
1239 // below.
1240 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1241 isa<ConstantInt>(PV1GEP->idx_begin())) {
1242 isRecursive = true;
1243 continue;
1244 }
1245 }
1246
1247 if (UniqueSrc.insert(PV1).second)
1248 V1Srcs.push_back(PV1);
1249 }
1250
1251 // If this PHI node is recursive, set the size of the accessed memory to
1252 // unknown to represent all the possible values the GEP could advance the
1253 // pointer to.
1254 if (isRecursive)
1255 PNSize = MemoryLocation::UnknownSize;
1256
1257 AliasResult Alias =
1258 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize, PNAAInfo);
1259
1260 // Early exit if the check of the first PHI source against V2 is MayAlias.
1261 // Other results are not possible.
1262 if (Alias == MayAlias)
1263 return MayAlias;
1264
1265 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1266 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1267 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1268 Value *V = V1Srcs[i];
1269
1270 AliasResult ThisAlias =
1271 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo);
1272 Alias = MergeAliasResults(ThisAlias, Alias);
1273 if (Alias == MayAlias)
1274 break;
1275 }
1276
1277 return Alias;
1278 }
1279
1280 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1281 /// array references.
aliasCheck(const Value * V1,uint64_t V1Size,AAMDNodes V1AAInfo,const Value * V2,uint64_t V2Size,AAMDNodes V2AAInfo)1282 AliasResult BasicAAResult::aliasCheck(const Value *V1, uint64_t V1Size,
1283 AAMDNodes V1AAInfo, const Value *V2,
1284 uint64_t V2Size, AAMDNodes V2AAInfo) {
1285 // If either of the memory references is empty, it doesn't matter what the
1286 // pointer values are.
1287 if (V1Size == 0 || V2Size == 0)
1288 return NoAlias;
1289
1290 // Strip off any casts if they exist.
1291 V1 = V1->stripPointerCasts();
1292 V2 = V2->stripPointerCasts();
1293
1294 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1295 // value for undef that aliases nothing in the program.
1296 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1297 return NoAlias;
1298
1299 // Are we checking for alias of the same value?
1300 // Because we look 'through' phi nodes we could look at "Value" pointers from
1301 // different iterations. We must therefore make sure that this is not the
1302 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1303 // happen by looking at the visited phi nodes and making sure they cannot
1304 // reach the value.
1305 if (isValueEqualInPotentialCycles(V1, V2))
1306 return MustAlias;
1307
1308 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1309 return NoAlias; // Scalars cannot alias each other
1310
1311 // Figure out what objects these things are pointing to if we can.
1312 const Value *O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1313 const Value *O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1314
1315 // Null values in the default address space don't point to any object, so they
1316 // don't alias any other pointer.
1317 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1318 if (CPN->getType()->getAddressSpace() == 0)
1319 return NoAlias;
1320 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1321 if (CPN->getType()->getAddressSpace() == 0)
1322 return NoAlias;
1323
1324 if (O1 != O2) {
1325 // If V1/V2 point to two different objects we know that we have no alias.
1326 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1327 return NoAlias;
1328
1329 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1330 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1331 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1332 return NoAlias;
1333
1334 // Function arguments can't alias with things that are known to be
1335 // unambigously identified at the function level.
1336 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1337 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1338 return NoAlias;
1339
1340 // Most objects can't alias null.
1341 if ((isa<ConstantPointerNull>(O2) && isKnownNonNull(O1)) ||
1342 (isa<ConstantPointerNull>(O1) && isKnownNonNull(O2)))
1343 return NoAlias;
1344
1345 // If one pointer is the result of a call/invoke or load and the other is a
1346 // non-escaping local object within the same function, then we know the
1347 // object couldn't escape to a point where the call could return it.
1348 //
1349 // Note that if the pointers are in different functions, there are a
1350 // variety of complications. A call with a nocapture argument may still
1351 // temporary store the nocapture argument's value in a temporary memory
1352 // location if that memory location doesn't escape. Or it may pass a
1353 // nocapture value to other functions as long as they don't capture it.
1354 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2))
1355 return NoAlias;
1356 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1))
1357 return NoAlias;
1358 }
1359
1360 // If the size of one access is larger than the entire object on the other
1361 // side, then we know such behavior is undefined and can assume no alias.
1362 if ((V1Size != MemoryLocation::UnknownSize &&
1363 isObjectSmallerThan(O2, V1Size, DL, TLI)) ||
1364 (V2Size != MemoryLocation::UnknownSize &&
1365 isObjectSmallerThan(O1, V2Size, DL, TLI)))
1366 return NoAlias;
1367
1368 // Check the cache before climbing up use-def chains. This also terminates
1369 // otherwise infinitely recursive queries.
1370 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1371 MemoryLocation(V2, V2Size, V2AAInfo));
1372 if (V1 > V2)
1373 std::swap(Locs.first, Locs.second);
1374 std::pair<AliasCacheTy::iterator, bool> Pair =
1375 AliasCache.insert(std::make_pair(Locs, MayAlias));
1376 if (!Pair.second)
1377 return Pair.first->second;
1378
1379 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1380 // GEP can't simplify, we don't even look at the PHI cases.
1381 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1382 std::swap(V1, V2);
1383 std::swap(V1Size, V2Size);
1384 std::swap(O1, O2);
1385 std::swap(V1AAInfo, V2AAInfo);
1386 }
1387 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1388 AliasResult Result =
1389 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1390 if (Result != MayAlias)
1391 return AliasCache[Locs] = Result;
1392 }
1393
1394 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1395 std::swap(V1, V2);
1396 std::swap(V1Size, V2Size);
1397 std::swap(V1AAInfo, V2AAInfo);
1398 }
1399 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1400 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1401 if (Result != MayAlias)
1402 return AliasCache[Locs] = Result;
1403 }
1404
1405 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1406 std::swap(V1, V2);
1407 std::swap(V1Size, V2Size);
1408 std::swap(V1AAInfo, V2AAInfo);
1409 }
1410 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1411 AliasResult Result =
1412 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo);
1413 if (Result != MayAlias)
1414 return AliasCache[Locs] = Result;
1415 }
1416
1417 // If both pointers are pointing into the same object and one of them
1418 // accesses is accessing the entire object, then the accesses must
1419 // overlap in some way.
1420 if (O1 == O2)
1421 if ((V1Size != MemoryLocation::UnknownSize &&
1422 isObjectSize(O1, V1Size, DL, TLI)) ||
1423 (V2Size != MemoryLocation::UnknownSize &&
1424 isObjectSize(O2, V2Size, DL, TLI)))
1425 return AliasCache[Locs] = PartialAlias;
1426
1427 // Recurse back into the best AA results we have, potentially with refined
1428 // memory locations. We have already ensured that BasicAA has a MayAlias
1429 // cache result for these, so any recursion back into BasicAA won't loop.
1430 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1431 return AliasCache[Locs] = Result;
1432 }
1433
1434 /// Check whether two Values can be considered equivalent.
1435 ///
1436 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1437 /// they can not be part of a cycle in the value graph by looking at all
1438 /// visited phi nodes an making sure that the phis cannot reach the value. We
1439 /// have to do this because we are looking through phi nodes (That is we say
1440 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
isValueEqualInPotentialCycles(const Value * V,const Value * V2)1441 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1442 const Value *V2) {
1443 if (V != V2)
1444 return false;
1445
1446 const Instruction *Inst = dyn_cast<Instruction>(V);
1447 if (!Inst)
1448 return true;
1449
1450 if (VisitedPhiBBs.empty())
1451 return true;
1452
1453 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1454 return false;
1455
1456 // Make sure that the visited phis cannot reach the Value. This ensures that
1457 // the Values cannot come from different iterations of a potential cycle the
1458 // phi nodes could be involved in.
1459 for (auto *P : VisitedPhiBBs)
1460 if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1461 return false;
1462
1463 return true;
1464 }
1465
1466 /// Computes the symbolic difference between two de-composed GEPs.
1467 ///
1468 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1469 /// instructions GEP1 and GEP2 which have common base pointers.
GetIndexDifference(SmallVectorImpl<VariableGEPIndex> & Dest,const SmallVectorImpl<VariableGEPIndex> & Src)1470 void BasicAAResult::GetIndexDifference(
1471 SmallVectorImpl<VariableGEPIndex> &Dest,
1472 const SmallVectorImpl<VariableGEPIndex> &Src) {
1473 if (Src.empty())
1474 return;
1475
1476 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1477 const Value *V = Src[i].V;
1478 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1479 int64_t Scale = Src[i].Scale;
1480
1481 // Find V in Dest. This is N^2, but pointer indices almost never have more
1482 // than a few variable indexes.
1483 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1484 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1485 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1486 continue;
1487
1488 // If we found it, subtract off Scale V's from the entry in Dest. If it
1489 // goes to zero, remove the entry.
1490 if (Dest[j].Scale != Scale)
1491 Dest[j].Scale -= Scale;
1492 else
1493 Dest.erase(Dest.begin() + j);
1494 Scale = 0;
1495 break;
1496 }
1497
1498 // If we didn't consume this entry, add it to the end of the Dest list.
1499 if (Scale) {
1500 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1501 Dest.push_back(Entry);
1502 }
1503 }
1504 }
1505
constantOffsetHeuristic(const SmallVectorImpl<VariableGEPIndex> & VarIndices,uint64_t V1Size,uint64_t V2Size,int64_t BaseOffset,AssumptionCache * AC,DominatorTree * DT)1506 bool BasicAAResult::constantOffsetHeuristic(
1507 const SmallVectorImpl<VariableGEPIndex> &VarIndices, uint64_t V1Size,
1508 uint64_t V2Size, int64_t BaseOffset, AssumptionCache *AC,
1509 DominatorTree *DT) {
1510 if (VarIndices.size() != 2 || V1Size == MemoryLocation::UnknownSize ||
1511 V2Size == MemoryLocation::UnknownSize)
1512 return false;
1513
1514 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1515
1516 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1517 Var0.Scale != -Var1.Scale)
1518 return false;
1519
1520 unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1521
1522 // We'll strip off the Extensions of Var0 and Var1 and do another round
1523 // of GetLinearExpression decomposition. In the example above, if Var0
1524 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1525
1526 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1527 V1Offset(Width, 0);
1528 bool NSW = true, NUW = true;
1529 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1530 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1531 V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1532 NSW = true, NUW = true;
1533 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1534 V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1535
1536 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1537 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1538 return false;
1539
1540 // We have a hit - Var0 and Var1 only differ by a constant offset!
1541
1542 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1543 // Var1 is possible to calculate, but we're just interested in the absolute
1544 // minimum difference between the two. The minimum distance may occur due to
1545 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1546 // the minimum distance between %i and %i + 5 is 3.
1547 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1548 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1549 uint64_t MinDiffBytes = MinDiff.getZExtValue() * std::abs(Var0.Scale);
1550
1551 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1552 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1553 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1554 // V2Size can fit in the MinDiffBytes gap.
1555 return V1Size + std::abs(BaseOffset) <= MinDiffBytes &&
1556 V2Size + std::abs(BaseOffset) <= MinDiffBytes;
1557 }
1558
1559 //===----------------------------------------------------------------------===//
1560 // BasicAliasAnalysis Pass
1561 //===----------------------------------------------------------------------===//
1562
1563 char BasicAA::PassID;
1564
run(Function & F,AnalysisManager<Function> * AM)1565 BasicAAResult BasicAA::run(Function &F, AnalysisManager<Function> *AM) {
1566 return BasicAAResult(F.getParent()->getDataLayout(),
1567 AM->getResult<TargetLibraryAnalysis>(F),
1568 AM->getResult<AssumptionAnalysis>(F),
1569 AM->getCachedResult<DominatorTreeAnalysis>(F),
1570 AM->getCachedResult<LoopAnalysis>(F));
1571 }
1572
BasicAAWrapperPass()1573 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1574 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1575 }
1576
1577 char BasicAAWrapperPass::ID = 0;
anchor()1578 void BasicAAWrapperPass::anchor() {}
1579
1580 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
1581 "Basic Alias Analysis (stateless AA impl)", true, true)
INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)1582 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1583 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1584 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
1585 "Basic Alias Analysis (stateless AA impl)", true, true)
1586
1587 FunctionPass *llvm::createBasicAAWrapperPass() {
1588 return new BasicAAWrapperPass();
1589 }
1590
runOnFunction(Function & F)1591 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1592 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1593 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1594 auto *DTWP = getAnalysisIfAvailable<DominatorTreeWrapperPass>();
1595 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
1596
1597 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), TLIWP.getTLI(),
1598 ACT.getAssumptionCache(F),
1599 DTWP ? &DTWP->getDomTree() : nullptr,
1600 LIWP ? &LIWP->getLoopInfo() : nullptr));
1601
1602 return false;
1603 }
1604
getAnalysisUsage(AnalysisUsage & AU) const1605 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
1606 AU.setPreservesAll();
1607 AU.addRequired<AssumptionCacheTracker>();
1608 AU.addRequired<TargetLibraryInfoWrapperPass>();
1609 }
1610
createLegacyPMBasicAAResult(Pass & P,Function & F)1611 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
1612 return BasicAAResult(
1613 F.getParent()->getDataLayout(),
1614 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
1615 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));
1616 }
1617