• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- AttributorAttributes.cpp - Attributes for Attributor deduction -----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // See the Attributor.h file comment and the class descriptions in that file for
10 // more information.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/Transforms/IPO/Attributor.h"
15 
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumeBundleQueries.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CaptureTracking.h"
23 #include "llvm/Analysis/LazyValueInfo.h"
24 #include "llvm/Analysis/MemoryBuiltins.h"
25 #include "llvm/Analysis/ScalarEvolution.h"
26 #include "llvm/Analysis/TargetTransformInfo.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/IRBuilder.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/NoFolder.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Transforms/IPO/ArgumentPromotion.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 
36 #include <cassert>
37 
38 using namespace llvm;
39 
40 #define DEBUG_TYPE "attributor"
41 
42 static cl::opt<bool> ManifestInternal(
43     "attributor-manifest-internal", cl::Hidden,
44     cl::desc("Manifest Attributor internal string attributes."),
45     cl::init(false));
46 
47 static cl::opt<int> MaxHeapToStackSize("max-heap-to-stack-size", cl::init(128),
48                                        cl::Hidden);
49 
50 template <>
51 unsigned llvm::PotentialConstantIntValuesState::MaxPotentialValues = 0;
52 
53 static cl::opt<unsigned, true> MaxPotentialValues(
54     "attributor-max-potential-values", cl::Hidden,
55     cl::desc("Maximum number of potential values to be "
56              "tracked for each position."),
57     cl::location(llvm::PotentialConstantIntValuesState::MaxPotentialValues),
58     cl::init(7));
59 
60 STATISTIC(NumAAs, "Number of abstract attributes created");
61 
62 // Some helper macros to deal with statistics tracking.
63 //
64 // Usage:
65 // For simple IR attribute tracking overload trackStatistics in the abstract
66 // attribute and choose the right STATS_DECLTRACK_********* macro,
67 // e.g.,:
68 //  void trackStatistics() const override {
69 //    STATS_DECLTRACK_ARG_ATTR(returned)
70 //  }
71 // If there is a single "increment" side one can use the macro
72 // STATS_DECLTRACK with a custom message. If there are multiple increment
73 // sides, STATS_DECL and STATS_TRACK can also be used separately.
74 //
75 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME)                                     \
76   ("Number of " #TYPE " marked '" #NAME "'")
77 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
78 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
79 #define STATS_DECL(NAME, TYPE, MSG)                                            \
80   STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
81 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
82 #define STATS_DECLTRACK(NAME, TYPE, MSG)                                       \
83   {                                                                            \
84     STATS_DECL(NAME, TYPE, MSG)                                                \
85     STATS_TRACK(NAME, TYPE)                                                    \
86   }
87 #define STATS_DECLTRACK_ARG_ATTR(NAME)                                         \
88   STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
89 #define STATS_DECLTRACK_CSARG_ATTR(NAME)                                       \
90   STATS_DECLTRACK(NAME, CSArguments,                                           \
91                   BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
92 #define STATS_DECLTRACK_FN_ATTR(NAME)                                          \
93   STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
94 #define STATS_DECLTRACK_CS_ATTR(NAME)                                          \
95   STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
96 #define STATS_DECLTRACK_FNRET_ATTR(NAME)                                       \
97   STATS_DECLTRACK(NAME, FunctionReturn,                                        \
98                   BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
99 #define STATS_DECLTRACK_CSRET_ATTR(NAME)                                       \
100   STATS_DECLTRACK(NAME, CSReturn,                                              \
101                   BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
102 #define STATS_DECLTRACK_FLOATING_ATTR(NAME)                                    \
103   STATS_DECLTRACK(NAME, Floating,                                              \
104                   ("Number of floating values known to be '" #NAME "'"))
105 
106 // Specialization of the operator<< for abstract attributes subclasses. This
107 // disambiguates situations where multiple operators are applicable.
108 namespace llvm {
109 #define PIPE_OPERATOR(CLASS)                                                   \
110   raw_ostream &operator<<(raw_ostream &OS, const CLASS &AA) {                  \
111     return OS << static_cast<const AbstractAttribute &>(AA);                   \
112   }
113 
114 PIPE_OPERATOR(AAIsDead)
115 PIPE_OPERATOR(AANoUnwind)
116 PIPE_OPERATOR(AANoSync)
117 PIPE_OPERATOR(AANoRecurse)
118 PIPE_OPERATOR(AAWillReturn)
119 PIPE_OPERATOR(AANoReturn)
120 PIPE_OPERATOR(AAReturnedValues)
121 PIPE_OPERATOR(AANonNull)
122 PIPE_OPERATOR(AANoAlias)
123 PIPE_OPERATOR(AADereferenceable)
124 PIPE_OPERATOR(AAAlign)
125 PIPE_OPERATOR(AANoCapture)
126 PIPE_OPERATOR(AAValueSimplify)
127 PIPE_OPERATOR(AANoFree)
128 PIPE_OPERATOR(AAHeapToStack)
129 PIPE_OPERATOR(AAReachability)
130 PIPE_OPERATOR(AAMemoryBehavior)
131 PIPE_OPERATOR(AAMemoryLocation)
132 PIPE_OPERATOR(AAValueConstantRange)
133 PIPE_OPERATOR(AAPrivatizablePtr)
134 PIPE_OPERATOR(AAUndefinedBehavior)
135 PIPE_OPERATOR(AAPotentialValues)
136 PIPE_OPERATOR(AANoUndef)
137 
138 #undef PIPE_OPERATOR
139 } // namespace llvm
140 
141 namespace {
142 
143 static Optional<ConstantInt *>
getAssumedConstantInt(Attributor & A,const Value & V,const AbstractAttribute & AA,bool & UsedAssumedInformation)144 getAssumedConstantInt(Attributor &A, const Value &V,
145                       const AbstractAttribute &AA,
146                       bool &UsedAssumedInformation) {
147   Optional<Constant *> C = A.getAssumedConstant(V, AA, UsedAssumedInformation);
148   if (C.hasValue())
149     return dyn_cast_or_null<ConstantInt>(C.getValue());
150   return llvm::None;
151 }
152 
153 /// Get pointer operand of memory accessing instruction. If \p I is
154 /// not a memory accessing instruction, return nullptr. If \p AllowVolatile,
155 /// is set to false and the instruction is volatile, return nullptr.
getPointerOperand(const Instruction * I,bool AllowVolatile)156 static const Value *getPointerOperand(const Instruction *I,
157                                       bool AllowVolatile) {
158   if (auto *LI = dyn_cast<LoadInst>(I)) {
159     if (!AllowVolatile && LI->isVolatile())
160       return nullptr;
161     return LI->getPointerOperand();
162   }
163 
164   if (auto *SI = dyn_cast<StoreInst>(I)) {
165     if (!AllowVolatile && SI->isVolatile())
166       return nullptr;
167     return SI->getPointerOperand();
168   }
169 
170   if (auto *CXI = dyn_cast<AtomicCmpXchgInst>(I)) {
171     if (!AllowVolatile && CXI->isVolatile())
172       return nullptr;
173     return CXI->getPointerOperand();
174   }
175 
176   if (auto *RMWI = dyn_cast<AtomicRMWInst>(I)) {
177     if (!AllowVolatile && RMWI->isVolatile())
178       return nullptr;
179     return RMWI->getPointerOperand();
180   }
181 
182   return nullptr;
183 }
184 
185 /// Helper function to create a pointer of type \p ResTy, based on \p Ptr, and
186 /// advanced by \p Offset bytes. To aid later analysis the method tries to build
187 /// getelement pointer instructions that traverse the natural type of \p Ptr if
188 /// possible. If that fails, the remaining offset is adjusted byte-wise, hence
189 /// through a cast to i8*.
190 ///
191 /// TODO: This could probably live somewhere more prominantly if it doesn't
192 ///       already exist.
constructPointer(Type * ResTy,Value * Ptr,int64_t Offset,IRBuilder<NoFolder> & IRB,const DataLayout & DL)193 static Value *constructPointer(Type *ResTy, Value *Ptr, int64_t Offset,
194                                IRBuilder<NoFolder> &IRB, const DataLayout &DL) {
195   assert(Offset >= 0 && "Negative offset not supported yet!");
196   LLVM_DEBUG(dbgs() << "Construct pointer: " << *Ptr << " + " << Offset
197                     << "-bytes as " << *ResTy << "\n");
198 
199   // The initial type we are trying to traverse to get nice GEPs.
200   Type *Ty = Ptr->getType();
201 
202   SmallVector<Value *, 4> Indices;
203   std::string GEPName = Ptr->getName().str();
204   while (Offset) {
205     uint64_t Idx, Rem;
206 
207     if (auto *STy = dyn_cast<StructType>(Ty)) {
208       const StructLayout *SL = DL.getStructLayout(STy);
209       if (int64_t(SL->getSizeInBytes()) < Offset)
210         break;
211       Idx = SL->getElementContainingOffset(Offset);
212       assert(Idx < STy->getNumElements() && "Offset calculation error!");
213       Rem = Offset - SL->getElementOffset(Idx);
214       Ty = STy->getElementType(Idx);
215     } else if (auto *PTy = dyn_cast<PointerType>(Ty)) {
216       Ty = PTy->getElementType();
217       if (!Ty->isSized())
218         break;
219       uint64_t ElementSize = DL.getTypeAllocSize(Ty);
220       assert(ElementSize && "Expected type with size!");
221       Idx = Offset / ElementSize;
222       Rem = Offset % ElementSize;
223     } else {
224       // Non-aggregate type, we cast and make byte-wise progress now.
225       break;
226     }
227 
228     LLVM_DEBUG(errs() << "Ty: " << *Ty << " Offset: " << Offset
229                       << " Idx: " << Idx << " Rem: " << Rem << "\n");
230 
231     GEPName += "." + std::to_string(Idx);
232     Indices.push_back(ConstantInt::get(IRB.getInt32Ty(), Idx));
233     Offset = Rem;
234   }
235 
236   // Create a GEP if we collected indices above.
237   if (Indices.size())
238     Ptr = IRB.CreateGEP(Ptr, Indices, GEPName);
239 
240   // If an offset is left we use byte-wise adjustment.
241   if (Offset) {
242     Ptr = IRB.CreateBitCast(Ptr, IRB.getInt8PtrTy());
243     Ptr = IRB.CreateGEP(Ptr, IRB.getInt32(Offset),
244                         GEPName + ".b" + Twine(Offset));
245   }
246 
247   // Ensure the result has the requested type.
248   Ptr = IRB.CreateBitOrPointerCast(Ptr, ResTy, Ptr->getName() + ".cast");
249 
250   LLVM_DEBUG(dbgs() << "Constructed pointer: " << *Ptr << "\n");
251   return Ptr;
252 }
253 
254 /// Recursively visit all values that might become \p IRP at some point. This
255 /// will be done by looking through cast instructions, selects, phis, and calls
256 /// with the "returned" attribute. Once we cannot look through the value any
257 /// further, the callback \p VisitValueCB is invoked and passed the current
258 /// value, the \p State, and a flag to indicate if we stripped anything.
259 /// Stripped means that we unpacked the value associated with \p IRP at least
260 /// once. Note that the value used for the callback may still be the value
261 /// associated with \p IRP (due to PHIs). To limit how much effort is invested,
262 /// we will never visit more values than specified by \p MaxValues.
263 template <typename AAType, typename StateTy>
genericValueTraversal(Attributor & A,IRPosition IRP,const AAType & QueryingAA,StateTy & State,function_ref<bool (Value &,const Instruction *,StateTy &,bool)> VisitValueCB,const Instruction * CtxI,bool UseValueSimplify=true,int MaxValues=16,function_ref<Value * (Value *)> StripCB=nullptr)264 static bool genericValueTraversal(
265     Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
266     function_ref<bool(Value &, const Instruction *, StateTy &, bool)>
267         VisitValueCB,
268     const Instruction *CtxI, bool UseValueSimplify = true, int MaxValues = 16,
269     function_ref<Value *(Value *)> StripCB = nullptr) {
270 
271   const AAIsDead *LivenessAA = nullptr;
272   if (IRP.getAnchorScope())
273     LivenessAA = &A.getAAFor<AAIsDead>(
274         QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
275         /* TrackDependence */ false);
276   bool AnyDead = false;
277 
278   using Item = std::pair<Value *, const Instruction *>;
279   SmallSet<Item, 16> Visited;
280   SmallVector<Item, 16> Worklist;
281   Worklist.push_back({&IRP.getAssociatedValue(), CtxI});
282 
283   int Iteration = 0;
284   do {
285     Item I = Worklist.pop_back_val();
286     Value *V = I.first;
287     CtxI = I.second;
288     if (StripCB)
289       V = StripCB(V);
290 
291     // Check if we should process the current value. To prevent endless
292     // recursion keep a record of the values we followed!
293     if (!Visited.insert(I).second)
294       continue;
295 
296     // Make sure we limit the compile time for complex expressions.
297     if (Iteration++ >= MaxValues)
298       return false;
299 
300     // Explicitly look through calls with a "returned" attribute if we do
301     // not have a pointer as stripPointerCasts only works on them.
302     Value *NewV = nullptr;
303     if (V->getType()->isPointerTy()) {
304       NewV = V->stripPointerCasts();
305     } else {
306       auto *CB = dyn_cast<CallBase>(V);
307       if (CB && CB->getCalledFunction()) {
308         for (Argument &Arg : CB->getCalledFunction()->args())
309           if (Arg.hasReturnedAttr()) {
310             NewV = CB->getArgOperand(Arg.getArgNo());
311             break;
312           }
313       }
314     }
315     if (NewV && NewV != V) {
316       Worklist.push_back({NewV, CtxI});
317       continue;
318     }
319 
320     // Look through select instructions, visit both potential values.
321     if (auto *SI = dyn_cast<SelectInst>(V)) {
322       Worklist.push_back({SI->getTrueValue(), CtxI});
323       Worklist.push_back({SI->getFalseValue(), CtxI});
324       continue;
325     }
326 
327     // Look through phi nodes, visit all live operands.
328     if (auto *PHI = dyn_cast<PHINode>(V)) {
329       assert(LivenessAA &&
330              "Expected liveness in the presence of instructions!");
331       for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
332         BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
333         if (A.isAssumedDead(*IncomingBB->getTerminator(), &QueryingAA,
334                             LivenessAA,
335                             /* CheckBBLivenessOnly */ true)) {
336           AnyDead = true;
337           continue;
338         }
339         Worklist.push_back(
340             {PHI->getIncomingValue(u), IncomingBB->getTerminator()});
341       }
342       continue;
343     }
344 
345     if (UseValueSimplify && !isa<Constant>(V)) {
346       bool UsedAssumedInformation = false;
347       Optional<Constant *> C =
348           A.getAssumedConstant(*V, QueryingAA, UsedAssumedInformation);
349       if (!C.hasValue())
350         continue;
351       if (Value *NewV = C.getValue()) {
352         Worklist.push_back({NewV, CtxI});
353         continue;
354       }
355     }
356 
357     // Once a leaf is reached we inform the user through the callback.
358     if (!VisitValueCB(*V, CtxI, State, Iteration > 1))
359       return false;
360   } while (!Worklist.empty());
361 
362   // If we actually used liveness information so we have to record a dependence.
363   if (AnyDead)
364     A.recordDependence(*LivenessAA, QueryingAA, DepClassTy::OPTIONAL);
365 
366   // All values have been visited.
367   return true;
368 }
369 
stripAndAccumulateMinimalOffsets(Attributor & A,const AbstractAttribute & QueryingAA,const Value * Val,const DataLayout & DL,APInt & Offset,bool AllowNonInbounds,bool UseAssumed=false)370 const Value *stripAndAccumulateMinimalOffsets(
371     Attributor &A, const AbstractAttribute &QueryingAA, const Value *Val,
372     const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
373     bool UseAssumed = false) {
374 
375   auto AttributorAnalysis = [&](Value &V, APInt &ROffset) -> bool {
376     const IRPosition &Pos = IRPosition::value(V);
377     // Only track dependence if we are going to use the assumed info.
378     const AAValueConstantRange &ValueConstantRangeAA =
379         A.getAAFor<AAValueConstantRange>(QueryingAA, Pos,
380                                          /* TrackDependence */ UseAssumed);
381     ConstantRange Range = UseAssumed ? ValueConstantRangeAA.getAssumed()
382                                      : ValueConstantRangeAA.getKnown();
383     // We can only use the lower part of the range because the upper part can
384     // be higher than what the value can really be.
385     ROffset = Range.getSignedMin();
386     return true;
387   };
388 
389   return Val->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds,
390                                                 AttributorAnalysis);
391 }
392 
getMinimalBaseOfAccsesPointerOperand(Attributor & A,const AbstractAttribute & QueryingAA,const Instruction * I,int64_t & BytesOffset,const DataLayout & DL,bool AllowNonInbounds=false)393 static const Value *getMinimalBaseOfAccsesPointerOperand(
394     Attributor &A, const AbstractAttribute &QueryingAA, const Instruction *I,
395     int64_t &BytesOffset, const DataLayout &DL, bool AllowNonInbounds = false) {
396   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
397   if (!Ptr)
398     return nullptr;
399   APInt OffsetAPInt(DL.getIndexTypeSizeInBits(Ptr->getType()), 0);
400   const Value *Base = stripAndAccumulateMinimalOffsets(
401       A, QueryingAA, Ptr, DL, OffsetAPInt, AllowNonInbounds);
402 
403   BytesOffset = OffsetAPInt.getSExtValue();
404   return Base;
405 }
406 
407 static const Value *
getBasePointerOfAccessPointerOperand(const Instruction * I,int64_t & BytesOffset,const DataLayout & DL,bool AllowNonInbounds=false)408 getBasePointerOfAccessPointerOperand(const Instruction *I, int64_t &BytesOffset,
409                                      const DataLayout &DL,
410                                      bool AllowNonInbounds = false) {
411   const Value *Ptr = getPointerOperand(I, /* AllowVolatile */ false);
412   if (!Ptr)
413     return nullptr;
414 
415   return GetPointerBaseWithConstantOffset(Ptr, BytesOffset, DL,
416                                           AllowNonInbounds);
417 }
418 
419 /// Helper function to clamp a state \p S of type \p StateType with the
420 /// information in \p R and indicate/return if \p S did change (as-in update is
421 /// required to be run again).
422 template <typename StateType>
clampStateAndIndicateChange(StateType & S,const StateType & R)423 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R) {
424   auto Assumed = S.getAssumed();
425   S ^= R;
426   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
427                                    : ChangeStatus::CHANGED;
428 }
429 
430 /// Clamp the information known for all returned values of a function
431 /// (identified by \p QueryingAA) into \p S.
432 template <typename AAType, typename StateType = typename AAType::StateType>
clampReturnedValueStates(Attributor & A,const AAType & QueryingAA,StateType & S)433 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
434                                      StateType &S) {
435   LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
436                     << QueryingAA << " into " << S << "\n");
437 
438   assert((QueryingAA.getIRPosition().getPositionKind() ==
439               IRPosition::IRP_RETURNED ||
440           QueryingAA.getIRPosition().getPositionKind() ==
441               IRPosition::IRP_CALL_SITE_RETURNED) &&
442          "Can only clamp returned value states for a function returned or call "
443          "site returned position!");
444 
445   // Use an optional state as there might not be any return values and we want
446   // to join (IntegerState::operator&) the state of all there are.
447   Optional<StateType> T;
448 
449   // Callback for each possibly returned value.
450   auto CheckReturnValue = [&](Value &RV) -> bool {
451     const IRPosition &RVPos = IRPosition::value(RV);
452     const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
453     LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
454                       << " @ " << RVPos << "\n");
455     const StateType &AAS = AA.getState();
456     if (T.hasValue())
457       *T &= AAS;
458     else
459       T = AAS;
460     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
461                       << "\n");
462     return T->isValidState();
463   };
464 
465   if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
466     S.indicatePessimisticFixpoint();
467   else if (T.hasValue())
468     S ^= *T;
469 }
470 
471 /// Helper class for generic deduction: return value -> returned position.
472 template <typename AAType, typename BaseType,
473           typename StateType = typename BaseType::StateType>
474 struct AAReturnedFromReturnedValues : public BaseType {
AAReturnedFromReturnedValues__anon0ce335530111::AAReturnedFromReturnedValues475   AAReturnedFromReturnedValues(const IRPosition &IRP, Attributor &A)
476       : BaseType(IRP, A) {}
477 
478   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAReturnedFromReturnedValues479   ChangeStatus updateImpl(Attributor &A) override {
480     StateType S(StateType::getBestState(this->getState()));
481     clampReturnedValueStates<AAType, StateType>(A, *this, S);
482     // TODO: If we know we visited all returned values, thus no are assumed
483     // dead, we can take the known information from the state T.
484     return clampStateAndIndicateChange<StateType>(this->getState(), S);
485   }
486 };
487 
488 /// Clamp the information known at all call sites for a given argument
489 /// (identified by \p QueryingAA) into \p S.
490 template <typename AAType, typename StateType = typename AAType::StateType>
clampCallSiteArgumentStates(Attributor & A,const AAType & QueryingAA,StateType & S)491 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
492                                         StateType &S) {
493   LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
494                     << QueryingAA << " into " << S << "\n");
495 
496   assert(QueryingAA.getIRPosition().getPositionKind() ==
497              IRPosition::IRP_ARGUMENT &&
498          "Can only clamp call site argument states for an argument position!");
499 
500   // Use an optional state as there might not be any return values and we want
501   // to join (IntegerState::operator&) the state of all there are.
502   Optional<StateType> T;
503 
504   // The argument number which is also the call site argument number.
505   unsigned ArgNo = QueryingAA.getIRPosition().getCallSiteArgNo();
506 
507   auto CallSiteCheck = [&](AbstractCallSite ACS) {
508     const IRPosition &ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
509     // Check if a coresponding argument was found or if it is on not associated
510     // (which can happen for callback calls).
511     if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
512       return false;
513 
514     const AAType &AA = A.getAAFor<AAType>(QueryingAA, ACSArgPos);
515     LLVM_DEBUG(dbgs() << "[Attributor] ACS: " << *ACS.getInstruction()
516                       << " AA: " << AA.getAsStr() << " @" << ACSArgPos << "\n");
517     const StateType &AAS = AA.getState();
518     if (T.hasValue())
519       *T &= AAS;
520     else
521       T = AAS;
522     LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
523                       << "\n");
524     return T->isValidState();
525   };
526 
527   bool AllCallSitesKnown;
528   if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true,
529                               AllCallSitesKnown))
530     S.indicatePessimisticFixpoint();
531   else if (T.hasValue())
532     S ^= *T;
533 }
534 
535 /// Helper class for generic deduction: call site argument -> argument position.
536 template <typename AAType, typename BaseType,
537           typename StateType = typename AAType::StateType>
538 struct AAArgumentFromCallSiteArguments : public BaseType {
AAArgumentFromCallSiteArguments__anon0ce335530111::AAArgumentFromCallSiteArguments539   AAArgumentFromCallSiteArguments(const IRPosition &IRP, Attributor &A)
540       : BaseType(IRP, A) {}
541 
542   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAArgumentFromCallSiteArguments543   ChangeStatus updateImpl(Attributor &A) override {
544     StateType S(StateType::getBestState(this->getState()));
545     clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
546     // TODO: If we know we visited all incoming values, thus no are assumed
547     // dead, we can take the known information from the state T.
548     return clampStateAndIndicateChange<StateType>(this->getState(), S);
549   }
550 };
551 
552 /// Helper class for generic replication: function returned -> cs returned.
553 template <typename AAType, typename BaseType,
554           typename StateType = typename BaseType::StateType>
555 struct AACallSiteReturnedFromReturned : public BaseType {
AACallSiteReturnedFromReturned__anon0ce335530111::AACallSiteReturnedFromReturned556   AACallSiteReturnedFromReturned(const IRPosition &IRP, Attributor &A)
557       : BaseType(IRP, A) {}
558 
559   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AACallSiteReturnedFromReturned560   ChangeStatus updateImpl(Attributor &A) override {
561     assert(this->getIRPosition().getPositionKind() ==
562                IRPosition::IRP_CALL_SITE_RETURNED &&
563            "Can only wrap function returned positions for call site returned "
564            "positions!");
565     auto &S = this->getState();
566 
567     const Function *AssociatedFunction =
568         this->getIRPosition().getAssociatedFunction();
569     if (!AssociatedFunction)
570       return S.indicatePessimisticFixpoint();
571 
572     IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
573     const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
574     return clampStateAndIndicateChange(S, AA.getState());
575   }
576 };
577 
578 /// Helper function to accumulate uses.
579 template <class AAType, typename StateType = typename AAType::StateType>
followUsesInContext(AAType & AA,Attributor & A,MustBeExecutedContextExplorer & Explorer,const Instruction * CtxI,SetVector<const Use * > & Uses,StateType & State)580 static void followUsesInContext(AAType &AA, Attributor &A,
581                                 MustBeExecutedContextExplorer &Explorer,
582                                 const Instruction *CtxI,
583                                 SetVector<const Use *> &Uses,
584                                 StateType &State) {
585   auto EIt = Explorer.begin(CtxI), EEnd = Explorer.end(CtxI);
586   for (unsigned u = 0; u < Uses.size(); ++u) {
587     const Use *U = Uses[u];
588     if (const Instruction *UserI = dyn_cast<Instruction>(U->getUser())) {
589       bool Found = Explorer.findInContextOf(UserI, EIt, EEnd);
590       if (Found && AA.followUseInMBEC(A, U, UserI, State))
591         for (const Use &Us : UserI->uses())
592           Uses.insert(&Us);
593     }
594   }
595 }
596 
597 /// Use the must-be-executed-context around \p I to add information into \p S.
598 /// The AAType class is required to have `followUseInMBEC` method with the
599 /// following signature and behaviour:
600 ///
601 /// bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I)
602 /// U - Underlying use.
603 /// I - The user of the \p U.
604 /// Returns true if the value should be tracked transitively.
605 ///
606 template <class AAType, typename StateType = typename AAType::StateType>
followUsesInMBEC(AAType & AA,Attributor & A,StateType & S,Instruction & CtxI)607 static void followUsesInMBEC(AAType &AA, Attributor &A, StateType &S,
608                              Instruction &CtxI) {
609 
610   // Container for (transitive) uses of the associated value.
611   SetVector<const Use *> Uses;
612   for (const Use &U : AA.getIRPosition().getAssociatedValue().uses())
613     Uses.insert(&U);
614 
615   MustBeExecutedContextExplorer &Explorer =
616       A.getInfoCache().getMustBeExecutedContextExplorer();
617 
618   followUsesInContext<AAType>(AA, A, Explorer, &CtxI, Uses, S);
619 
620   if (S.isAtFixpoint())
621     return;
622 
623   SmallVector<const BranchInst *, 4> BrInsts;
624   auto Pred = [&](const Instruction *I) {
625     if (const BranchInst *Br = dyn_cast<BranchInst>(I))
626       if (Br->isConditional())
627         BrInsts.push_back(Br);
628     return true;
629   };
630 
631   // Here, accumulate conditional branch instructions in the context. We
632   // explore the child paths and collect the known states. The disjunction of
633   // those states can be merged to its own state. Let ParentState_i be a state
634   // to indicate the known information for an i-th branch instruction in the
635   // context. ChildStates are created for its successors respectively.
636   //
637   // ParentS_1 = ChildS_{1, 1} /\ ChildS_{1, 2} /\ ... /\ ChildS_{1, n_1}
638   // ParentS_2 = ChildS_{2, 1} /\ ChildS_{2, 2} /\ ... /\ ChildS_{2, n_2}
639   //      ...
640   // ParentS_m = ChildS_{m, 1} /\ ChildS_{m, 2} /\ ... /\ ChildS_{m, n_m}
641   //
642   // Known State |= ParentS_1 \/ ParentS_2 \/... \/ ParentS_m
643   //
644   // FIXME: Currently, recursive branches are not handled. For example, we
645   // can't deduce that ptr must be dereferenced in below function.
646   //
647   // void f(int a, int c, int *ptr) {
648   //    if(a)
649   //      if (b) {
650   //        *ptr = 0;
651   //      } else {
652   //        *ptr = 1;
653   //      }
654   //    else {
655   //      if (b) {
656   //        *ptr = 0;
657   //      } else {
658   //        *ptr = 1;
659   //      }
660   //    }
661   // }
662 
663   Explorer.checkForAllContext(&CtxI, Pred);
664   for (const BranchInst *Br : BrInsts) {
665     StateType ParentState;
666 
667     // The known state of the parent state is a conjunction of children's
668     // known states so it is initialized with a best state.
669     ParentState.indicateOptimisticFixpoint();
670 
671     for (const BasicBlock *BB : Br->successors()) {
672       StateType ChildState;
673 
674       size_t BeforeSize = Uses.size();
675       followUsesInContext(AA, A, Explorer, &BB->front(), Uses, ChildState);
676 
677       // Erase uses which only appear in the child.
678       for (auto It = Uses.begin() + BeforeSize; It != Uses.end();)
679         It = Uses.erase(It);
680 
681       ParentState &= ChildState;
682     }
683 
684     // Use only known state.
685     S += ParentState;
686   }
687 }
688 
689 /// -----------------------NoUnwind Function Attribute--------------------------
690 
691 struct AANoUnwindImpl : AANoUnwind {
AANoUnwindImpl__anon0ce335530111::AANoUnwindImpl692   AANoUnwindImpl(const IRPosition &IRP, Attributor &A) : AANoUnwind(IRP, A) {}
693 
getAsStr__anon0ce335530111::AANoUnwindImpl694   const std::string getAsStr() const override {
695     return getAssumed() ? "nounwind" : "may-unwind";
696   }
697 
698   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoUnwindImpl699   ChangeStatus updateImpl(Attributor &A) override {
700     auto Opcodes = {
701         (unsigned)Instruction::Invoke,      (unsigned)Instruction::CallBr,
702         (unsigned)Instruction::Call,        (unsigned)Instruction::CleanupRet,
703         (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
704 
705     auto CheckForNoUnwind = [&](Instruction &I) {
706       if (!I.mayThrow())
707         return true;
708 
709       if (const auto *CB = dyn_cast<CallBase>(&I)) {
710         const auto &NoUnwindAA =
711             A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(*CB));
712         return NoUnwindAA.isAssumedNoUnwind();
713       }
714       return false;
715     };
716 
717     if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
718       return indicatePessimisticFixpoint();
719 
720     return ChangeStatus::UNCHANGED;
721   }
722 };
723 
724 struct AANoUnwindFunction final : public AANoUnwindImpl {
AANoUnwindFunction__anon0ce335530111::AANoUnwindFunction725   AANoUnwindFunction(const IRPosition &IRP, Attributor &A)
726       : AANoUnwindImpl(IRP, A) {}
727 
728   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoUnwindFunction729   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
730 };
731 
732 /// NoUnwind attribute deduction for a call sites.
733 struct AANoUnwindCallSite final : AANoUnwindImpl {
AANoUnwindCallSite__anon0ce335530111::AANoUnwindCallSite734   AANoUnwindCallSite(const IRPosition &IRP, Attributor &A)
735       : AANoUnwindImpl(IRP, A) {}
736 
737   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoUnwindCallSite738   void initialize(Attributor &A) override {
739     AANoUnwindImpl::initialize(A);
740     Function *F = getAssociatedFunction();
741     if (!F || F->isDeclaration())
742       indicatePessimisticFixpoint();
743   }
744 
745   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoUnwindCallSite746   ChangeStatus updateImpl(Attributor &A) override {
747     // TODO: Once we have call site specific value information we can provide
748     //       call site specific liveness information and then it makes
749     //       sense to specialize attributes for call sites arguments instead of
750     //       redirecting requests to the callee argument.
751     Function *F = getAssociatedFunction();
752     const IRPosition &FnPos = IRPosition::function(*F);
753     auto &FnAA = A.getAAFor<AANoUnwind>(*this, FnPos);
754     return clampStateAndIndicateChange(getState(), FnAA.getState());
755   }
756 
757   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoUnwindCallSite758   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nounwind); }
759 };
760 
761 /// --------------------- Function Return Values -------------------------------
762 
763 /// "Attribute" that collects all potential returned values and the return
764 /// instructions that they arise from.
765 ///
766 /// If there is a unique returned value R, the manifest method will:
767 ///   - mark R with the "returned" attribute, if R is an argument.
768 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
769 
770   /// Mapping of values potentially returned by the associated function to the
771   /// return instructions that might return them.
772   MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
773 
774   /// Mapping to remember the number of returned values for a call site such
775   /// that we can avoid updates if nothing changed.
776   DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
777 
778   /// Set of unresolved calls returned by the associated function.
779   SmallSetVector<CallBase *, 4> UnresolvedCalls;
780 
781   /// State flags
782   ///
783   ///{
784   bool IsFixed = false;
785   bool IsValidState = true;
786   ///}
787 
788 public:
AAReturnedValuesImpl(const IRPosition & IRP,Attributor & A)789   AAReturnedValuesImpl(const IRPosition &IRP, Attributor &A)
790       : AAReturnedValues(IRP, A) {}
791 
792   /// See AbstractAttribute::initialize(...).
initialize(Attributor & A)793   void initialize(Attributor &A) override {
794     // Reset the state.
795     IsFixed = false;
796     IsValidState = true;
797     ReturnedValues.clear();
798 
799     Function *F = getAssociatedFunction();
800     if (!F || F->isDeclaration()) {
801       indicatePessimisticFixpoint();
802       return;
803     }
804     assert(!F->getReturnType()->isVoidTy() &&
805            "Did not expect a void return type!");
806 
807     // The map from instruction opcodes to those instructions in the function.
808     auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
809 
810     // Look through all arguments, if one is marked as returned we are done.
811     for (Argument &Arg : F->args()) {
812       if (Arg.hasReturnedAttr()) {
813         auto &ReturnInstSet = ReturnedValues[&Arg];
814         if (auto *Insts = OpcodeInstMap.lookup(Instruction::Ret))
815           for (Instruction *RI : *Insts)
816             ReturnInstSet.insert(cast<ReturnInst>(RI));
817 
818         indicateOptimisticFixpoint();
819         return;
820       }
821     }
822 
823     if (!A.isFunctionIPOAmendable(*F))
824       indicatePessimisticFixpoint();
825   }
826 
827   /// See AbstractAttribute::manifest(...).
828   ChangeStatus manifest(Attributor &A) override;
829 
830   /// See AbstractAttribute::getState(...).
getState()831   AbstractState &getState() override { return *this; }
832 
833   /// See AbstractAttribute::getState(...).
getState() const834   const AbstractState &getState() const override { return *this; }
835 
836   /// See AbstractAttribute::updateImpl(Attributor &A).
837   ChangeStatus updateImpl(Attributor &A) override;
838 
returned_values()839   llvm::iterator_range<iterator> returned_values() override {
840     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
841   }
842 
returned_values() const843   llvm::iterator_range<const_iterator> returned_values() const override {
844     return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
845   }
846 
getUnresolvedCalls() const847   const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
848     return UnresolvedCalls;
849   }
850 
851   /// Return the number of potential return values, -1 if unknown.
getNumReturnValues() const852   size_t getNumReturnValues() const override {
853     return isValidState() ? ReturnedValues.size() : -1;
854   }
855 
856   /// Return an assumed unique return value if a single candidate is found. If
857   /// there cannot be one, return a nullptr. If it is not clear yet, return the
858   /// Optional::NoneType.
859   Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
860 
861   /// See AbstractState::checkForAllReturnedValues(...).
862   bool checkForAllReturnedValuesAndReturnInsts(
863       function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
864       const override;
865 
866   /// Pretty print the attribute similar to the IR representation.
867   const std::string getAsStr() const override;
868 
869   /// See AbstractState::isAtFixpoint().
isAtFixpoint() const870   bool isAtFixpoint() const override { return IsFixed; }
871 
872   /// See AbstractState::isValidState().
isValidState() const873   bool isValidState() const override { return IsValidState; }
874 
875   /// See AbstractState::indicateOptimisticFixpoint(...).
indicateOptimisticFixpoint()876   ChangeStatus indicateOptimisticFixpoint() override {
877     IsFixed = true;
878     return ChangeStatus::UNCHANGED;
879   }
880 
indicatePessimisticFixpoint()881   ChangeStatus indicatePessimisticFixpoint() override {
882     IsFixed = true;
883     IsValidState = false;
884     return ChangeStatus::CHANGED;
885   }
886 };
887 
manifest(Attributor & A)888 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
889   ChangeStatus Changed = ChangeStatus::UNCHANGED;
890 
891   // Bookkeeping.
892   assert(isValidState());
893   STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
894                   "Number of function with known return values");
895 
896   // Check if we have an assumed unique return value that we could manifest.
897   Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
898 
899   if (!UniqueRV.hasValue() || !UniqueRV.getValue())
900     return Changed;
901 
902   // Bookkeeping.
903   STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
904                   "Number of function with unique return");
905 
906   // Callback to replace the uses of CB with the constant C.
907   auto ReplaceCallSiteUsersWith = [&A](CallBase &CB, Constant &C) {
908     if (CB.use_empty())
909       return ChangeStatus::UNCHANGED;
910     if (A.changeValueAfterManifest(CB, C))
911       return ChangeStatus::CHANGED;
912     return ChangeStatus::UNCHANGED;
913   };
914 
915   // If the assumed unique return value is an argument, annotate it.
916   if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
917     if (UniqueRVArg->getType()->canLosslesslyBitCastTo(
918             getAssociatedFunction()->getReturnType())) {
919       getIRPosition() = IRPosition::argument(*UniqueRVArg);
920       Changed = IRAttribute::manifest(A);
921     }
922   } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
923     // We can replace the returned value with the unique returned constant.
924     Value &AnchorValue = getAnchorValue();
925     if (Function *F = dyn_cast<Function>(&AnchorValue)) {
926       for (const Use &U : F->uses())
927         if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
928           if (CB->isCallee(&U)) {
929             Constant *RVCCast =
930                 CB->getType() == RVC->getType()
931                     ? RVC
932                     : ConstantExpr::getTruncOrBitCast(RVC, CB->getType());
933             Changed = ReplaceCallSiteUsersWith(*CB, *RVCCast) | Changed;
934           }
935     } else {
936       assert(isa<CallBase>(AnchorValue) &&
937              "Expcected a function or call base anchor!");
938       Constant *RVCCast =
939           AnchorValue.getType() == RVC->getType()
940               ? RVC
941               : ConstantExpr::getTruncOrBitCast(RVC, AnchorValue.getType());
942       Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVCCast);
943     }
944     if (Changed == ChangeStatus::CHANGED)
945       STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
946                       "Number of function returns replaced by constant return");
947   }
948 
949   return Changed;
950 }
951 
getAsStr() const952 const std::string AAReturnedValuesImpl::getAsStr() const {
953   return (isAtFixpoint() ? "returns(#" : "may-return(#") +
954          (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
955          ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
956 }
957 
958 Optional<Value *>
getAssumedUniqueReturnValue(Attributor & A) const959 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
960   // If checkForAllReturnedValues provides a unique value, ignoring potential
961   // undef values that can also be present, it is assumed to be the actual
962   // return value and forwarded to the caller of this method. If there are
963   // multiple, a nullptr is returned indicating there cannot be a unique
964   // returned value.
965   Optional<Value *> UniqueRV;
966 
967   auto Pred = [&](Value &RV) -> bool {
968     // If we found a second returned value and neither the current nor the saved
969     // one is an undef, there is no unique returned value. Undefs are special
970     // since we can pretend they have any value.
971     if (UniqueRV.hasValue() && UniqueRV != &RV &&
972         !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
973       UniqueRV = nullptr;
974       return false;
975     }
976 
977     // Do not overwrite a value with an undef.
978     if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
979       UniqueRV = &RV;
980 
981     return true;
982   };
983 
984   if (!A.checkForAllReturnedValues(Pred, *this))
985     UniqueRV = nullptr;
986 
987   return UniqueRV;
988 }
989 
checkForAllReturnedValuesAndReturnInsts(function_ref<bool (Value &,const SmallSetVector<ReturnInst *,4> &)> Pred) const990 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
991     function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred)
992     const {
993   if (!isValidState())
994     return false;
995 
996   // Check all returned values but ignore call sites as long as we have not
997   // encountered an overdefined one during an update.
998   for (auto &It : ReturnedValues) {
999     Value *RV = It.first;
1000 
1001     CallBase *CB = dyn_cast<CallBase>(RV);
1002     if (CB && !UnresolvedCalls.count(CB))
1003       continue;
1004 
1005     if (!Pred(*RV, It.second))
1006       return false;
1007   }
1008 
1009   return true;
1010 }
1011 
updateImpl(Attributor & A)1012 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
1013   size_t NumUnresolvedCalls = UnresolvedCalls.size();
1014   bool Changed = false;
1015 
1016   // State used in the value traversals starting in returned values.
1017   struct RVState {
1018     // The map in which we collect return values -> return instrs.
1019     decltype(ReturnedValues) &RetValsMap;
1020     // The flag to indicate a change.
1021     bool &Changed;
1022     // The return instrs we come from.
1023     SmallSetVector<ReturnInst *, 4> RetInsts;
1024   };
1025 
1026   // Callback for a leaf value returned by the associated function.
1027   auto VisitValueCB = [](Value &Val, const Instruction *, RVState &RVS,
1028                          bool) -> bool {
1029     auto Size = RVS.RetValsMap[&Val].size();
1030     RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
1031     bool Inserted = RVS.RetValsMap[&Val].size() != Size;
1032     RVS.Changed |= Inserted;
1033     LLVM_DEBUG({
1034       if (Inserted)
1035         dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
1036                << " => " << RVS.RetInsts.size() << "\n";
1037     });
1038     return true;
1039   };
1040 
1041   // Helper method to invoke the generic value traversal.
1042   auto VisitReturnedValue = [&](Value &RV, RVState &RVS,
1043                                 const Instruction *CtxI) {
1044     IRPosition RetValPos = IRPosition::value(RV);
1045     return genericValueTraversal<AAReturnedValues, RVState>(
1046         A, RetValPos, *this, RVS, VisitValueCB, CtxI,
1047         /* UseValueSimplify */ false);
1048   };
1049 
1050   // Callback for all "return intructions" live in the associated function.
1051   auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
1052     ReturnInst &Ret = cast<ReturnInst>(I);
1053     RVState RVS({ReturnedValues, Changed, {}});
1054     RVS.RetInsts.insert(&Ret);
1055     return VisitReturnedValue(*Ret.getReturnValue(), RVS, &I);
1056   };
1057 
1058   // Start by discovering returned values from all live returned instructions in
1059   // the associated function.
1060   if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
1061     return indicatePessimisticFixpoint();
1062 
1063   // Once returned values "directly" present in the code are handled we try to
1064   // resolve returned calls. To avoid modifications to the ReturnedValues map
1065   // while we iterate over it we kept record of potential new entries in a copy
1066   // map, NewRVsMap.
1067   decltype(ReturnedValues) NewRVsMap;
1068 
1069   auto HandleReturnValue = [&](Value *RV,
1070                                SmallSetVector<ReturnInst *, 4> &RIs) {
1071     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *RV << " by #"
1072                       << RIs.size() << " RIs\n");
1073     CallBase *CB = dyn_cast<CallBase>(RV);
1074     if (!CB || UnresolvedCalls.count(CB))
1075       return;
1076 
1077     if (!CB->getCalledFunction()) {
1078       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1079                         << "\n");
1080       UnresolvedCalls.insert(CB);
1081       return;
1082     }
1083 
1084     // TODO: use the function scope once we have call site AAReturnedValues.
1085     const auto &RetValAA = A.getAAFor<AAReturnedValues>(
1086         *this, IRPosition::function(*CB->getCalledFunction()));
1087     LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1088                       << RetValAA << "\n");
1089 
1090     // Skip dead ends, thus if we do not know anything about the returned
1091     // call we mark it as unresolved and it will stay that way.
1092     if (!RetValAA.getState().isValidState()) {
1093       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1094                         << "\n");
1095       UnresolvedCalls.insert(CB);
1096       return;
1097     }
1098 
1099     // Do not try to learn partial information. If the callee has unresolved
1100     // return values we will treat the call as unresolved/opaque.
1101     auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1102     if (!RetValAAUnresolvedCalls.empty()) {
1103       UnresolvedCalls.insert(CB);
1104       return;
1105     }
1106 
1107     // Now check if we can track transitively returned values. If possible, thus
1108     // if all return value can be represented in the current scope, do so.
1109     bool Unresolved = false;
1110     for (auto &RetValAAIt : RetValAA.returned_values()) {
1111       Value *RetVal = RetValAAIt.first;
1112       if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1113           isa<Constant>(RetVal))
1114         continue;
1115       // Anything that did not fit in the above categories cannot be resolved,
1116       // mark the call as unresolved.
1117       LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1118                            "cannot be translated: "
1119                         << *RetVal << "\n");
1120       UnresolvedCalls.insert(CB);
1121       Unresolved = true;
1122       break;
1123     }
1124 
1125     if (Unresolved)
1126       return;
1127 
1128     // Now track transitively returned values.
1129     unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1130     if (NumRetAA == RetValAA.getNumReturnValues()) {
1131       LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1132                            "changed since it was seen last\n");
1133       return;
1134     }
1135     NumRetAA = RetValAA.getNumReturnValues();
1136 
1137     for (auto &RetValAAIt : RetValAA.returned_values()) {
1138       Value *RetVal = RetValAAIt.first;
1139       if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1140         // Arguments are mapped to call site operands and we begin the traversal
1141         // again.
1142         bool Unused = false;
1143         RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1144         VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS, CB);
1145         continue;
1146       }
1147       if (isa<CallBase>(RetVal)) {
1148         // Call sites are resolved by the callee attribute over time, no need to
1149         // do anything for us.
1150         continue;
1151       }
1152       if (isa<Constant>(RetVal)) {
1153         // Constants are valid everywhere, we can simply take them.
1154         NewRVsMap[RetVal].insert(RIs.begin(), RIs.end());
1155         continue;
1156       }
1157     }
1158   };
1159 
1160   for (auto &It : ReturnedValues)
1161     HandleReturnValue(It.first, It.second);
1162 
1163   // Because processing the new information can again lead to new return values
1164   // we have to be careful and iterate until this iteration is complete. The
1165   // idea is that we are in a stable state at the end of an update. All return
1166   // values have been handled and properly categorized. We might not update
1167   // again if we have not requested a non-fix attribute so we cannot "wait" for
1168   // the next update to analyze a new return value.
1169   while (!NewRVsMap.empty()) {
1170     auto It = std::move(NewRVsMap.back());
1171     NewRVsMap.pop_back();
1172 
1173     assert(!It.second.empty() && "Entry does not add anything.");
1174     auto &ReturnInsts = ReturnedValues[It.first];
1175     for (ReturnInst *RI : It.second)
1176       if (ReturnInsts.insert(RI)) {
1177         LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1178                           << *It.first << " => " << *RI << "\n");
1179         HandleReturnValue(It.first, ReturnInsts);
1180         Changed = true;
1181       }
1182   }
1183 
1184   Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1185   return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1186 }
1187 
1188 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
AAReturnedValuesFunction__anon0ce335530111::AAReturnedValuesFunction1189   AAReturnedValuesFunction(const IRPosition &IRP, Attributor &A)
1190       : AAReturnedValuesImpl(IRP, A) {}
1191 
1192   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAReturnedValuesFunction1193   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1194 };
1195 
1196 /// Returned values information for a call sites.
1197 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
AAReturnedValuesCallSite__anon0ce335530111::AAReturnedValuesCallSite1198   AAReturnedValuesCallSite(const IRPosition &IRP, Attributor &A)
1199       : AAReturnedValuesImpl(IRP, A) {}
1200 
1201   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAReturnedValuesCallSite1202   void initialize(Attributor &A) override {
1203     // TODO: Once we have call site specific value information we can provide
1204     //       call site specific liveness information and then it makes
1205     //       sense to specialize attributes for call sites instead of
1206     //       redirecting requests to the callee.
1207     llvm_unreachable("Abstract attributes for returned values are not "
1208                      "supported for call sites yet!");
1209   }
1210 
1211   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAReturnedValuesCallSite1212   ChangeStatus updateImpl(Attributor &A) override {
1213     return indicatePessimisticFixpoint();
1214   }
1215 
1216   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAReturnedValuesCallSite1217   void trackStatistics() const override {}
1218 };
1219 
1220 /// ------------------------ NoSync Function Attribute -------------------------
1221 
1222 struct AANoSyncImpl : AANoSync {
AANoSyncImpl__anon0ce335530111::AANoSyncImpl1223   AANoSyncImpl(const IRPosition &IRP, Attributor &A) : AANoSync(IRP, A) {}
1224 
getAsStr__anon0ce335530111::AANoSyncImpl1225   const std::string getAsStr() const override {
1226     return getAssumed() ? "nosync" : "may-sync";
1227   }
1228 
1229   /// See AbstractAttribute::updateImpl(...).
1230   ChangeStatus updateImpl(Attributor &A) override;
1231 
1232   /// Helper function used to determine whether an instruction is non-relaxed
1233   /// atomic. In other words, if an atomic instruction does not have unordered
1234   /// or monotonic ordering
1235   static bool isNonRelaxedAtomic(Instruction *I);
1236 
1237   /// Helper function used to determine whether an instruction is volatile.
1238   static bool isVolatile(Instruction *I);
1239 
1240   /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1241   /// memset).
1242   static bool isNoSyncIntrinsic(Instruction *I);
1243 };
1244 
isNonRelaxedAtomic(Instruction * I)1245 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1246   if (!I->isAtomic())
1247     return false;
1248 
1249   AtomicOrdering Ordering;
1250   switch (I->getOpcode()) {
1251   case Instruction::AtomicRMW:
1252     Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1253     break;
1254   case Instruction::Store:
1255     Ordering = cast<StoreInst>(I)->getOrdering();
1256     break;
1257   case Instruction::Load:
1258     Ordering = cast<LoadInst>(I)->getOrdering();
1259     break;
1260   case Instruction::Fence: {
1261     auto *FI = cast<FenceInst>(I);
1262     if (FI->getSyncScopeID() == SyncScope::SingleThread)
1263       return false;
1264     Ordering = FI->getOrdering();
1265     break;
1266   }
1267   case Instruction::AtomicCmpXchg: {
1268     AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1269     AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1270     // Only if both are relaxed, than it can be treated as relaxed.
1271     // Otherwise it is non-relaxed.
1272     if (Success != AtomicOrdering::Unordered &&
1273         Success != AtomicOrdering::Monotonic)
1274       return true;
1275     if (Failure != AtomicOrdering::Unordered &&
1276         Failure != AtomicOrdering::Monotonic)
1277       return true;
1278     return false;
1279   }
1280   default:
1281     llvm_unreachable(
1282         "New atomic operations need to be known in the attributor.");
1283   }
1284 
1285   // Relaxed.
1286   if (Ordering == AtomicOrdering::Unordered ||
1287       Ordering == AtomicOrdering::Monotonic)
1288     return false;
1289   return true;
1290 }
1291 
1292 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1293 /// FIXME: We should ipmrove the handling of intrinsics.
isNoSyncIntrinsic(Instruction * I)1294 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1295   if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1296     switch (II->getIntrinsicID()) {
1297     /// Element wise atomic memory intrinsics are can only be unordered,
1298     /// therefore nosync.
1299     case Intrinsic::memset_element_unordered_atomic:
1300     case Intrinsic::memmove_element_unordered_atomic:
1301     case Intrinsic::memcpy_element_unordered_atomic:
1302       return true;
1303     case Intrinsic::memset:
1304     case Intrinsic::memmove:
1305     case Intrinsic::memcpy:
1306       if (!cast<MemIntrinsic>(II)->isVolatile())
1307         return true;
1308       return false;
1309     default:
1310       return false;
1311     }
1312   }
1313   return false;
1314 }
1315 
isVolatile(Instruction * I)1316 bool AANoSyncImpl::isVolatile(Instruction *I) {
1317   assert(!isa<CallBase>(I) && "Calls should not be checked here");
1318 
1319   switch (I->getOpcode()) {
1320   case Instruction::AtomicRMW:
1321     return cast<AtomicRMWInst>(I)->isVolatile();
1322   case Instruction::Store:
1323     return cast<StoreInst>(I)->isVolatile();
1324   case Instruction::Load:
1325     return cast<LoadInst>(I)->isVolatile();
1326   case Instruction::AtomicCmpXchg:
1327     return cast<AtomicCmpXchgInst>(I)->isVolatile();
1328   default:
1329     return false;
1330   }
1331 }
1332 
updateImpl(Attributor & A)1333 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1334 
1335   auto CheckRWInstForNoSync = [&](Instruction &I) {
1336     /// We are looking for volatile instructions or Non-Relaxed atomics.
1337     /// FIXME: We should improve the handling of intrinsics.
1338 
1339     if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1340       return true;
1341 
1342     if (const auto *CB = dyn_cast<CallBase>(&I)) {
1343       if (CB->hasFnAttr(Attribute::NoSync))
1344         return true;
1345 
1346       const auto &NoSyncAA =
1347           A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(*CB));
1348       if (NoSyncAA.isAssumedNoSync())
1349         return true;
1350       return false;
1351     }
1352 
1353     if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1354       return true;
1355 
1356     return false;
1357   };
1358 
1359   auto CheckForNoSync = [&](Instruction &I) {
1360     // At this point we handled all read/write effects and they are all
1361     // nosync, so they can be skipped.
1362     if (I.mayReadOrWriteMemory())
1363       return true;
1364 
1365     // non-convergent and readnone imply nosync.
1366     return !cast<CallBase>(I).isConvergent();
1367   };
1368 
1369   if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1370       !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1371     return indicatePessimisticFixpoint();
1372 
1373   return ChangeStatus::UNCHANGED;
1374 }
1375 
1376 struct AANoSyncFunction final : public AANoSyncImpl {
AANoSyncFunction__anon0ce335530111::AANoSyncFunction1377   AANoSyncFunction(const IRPosition &IRP, Attributor &A)
1378       : AANoSyncImpl(IRP, A) {}
1379 
1380   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoSyncFunction1381   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1382 };
1383 
1384 /// NoSync attribute deduction for a call sites.
1385 struct AANoSyncCallSite final : AANoSyncImpl {
AANoSyncCallSite__anon0ce335530111::AANoSyncCallSite1386   AANoSyncCallSite(const IRPosition &IRP, Attributor &A)
1387       : AANoSyncImpl(IRP, A) {}
1388 
1389   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoSyncCallSite1390   void initialize(Attributor &A) override {
1391     AANoSyncImpl::initialize(A);
1392     Function *F = getAssociatedFunction();
1393     if (!F || F->isDeclaration())
1394       indicatePessimisticFixpoint();
1395   }
1396 
1397   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoSyncCallSite1398   ChangeStatus updateImpl(Attributor &A) override {
1399     // TODO: Once we have call site specific value information we can provide
1400     //       call site specific liveness information and then it makes
1401     //       sense to specialize attributes for call sites arguments instead of
1402     //       redirecting requests to the callee argument.
1403     Function *F = getAssociatedFunction();
1404     const IRPosition &FnPos = IRPosition::function(*F);
1405     auto &FnAA = A.getAAFor<AANoSync>(*this, FnPos);
1406     return clampStateAndIndicateChange(getState(), FnAA.getState());
1407   }
1408 
1409   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoSyncCallSite1410   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nosync); }
1411 };
1412 
1413 /// ------------------------ No-Free Attributes ----------------------------
1414 
1415 struct AANoFreeImpl : public AANoFree {
AANoFreeImpl__anon0ce335530111::AANoFreeImpl1416   AANoFreeImpl(const IRPosition &IRP, Attributor &A) : AANoFree(IRP, A) {}
1417 
1418   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoFreeImpl1419   ChangeStatus updateImpl(Attributor &A) override {
1420     auto CheckForNoFree = [&](Instruction &I) {
1421       const auto &CB = cast<CallBase>(I);
1422       if (CB.hasFnAttr(Attribute::NoFree))
1423         return true;
1424 
1425       const auto &NoFreeAA =
1426           A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(CB));
1427       return NoFreeAA.isAssumedNoFree();
1428     };
1429 
1430     if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1431       return indicatePessimisticFixpoint();
1432     return ChangeStatus::UNCHANGED;
1433   }
1434 
1435   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AANoFreeImpl1436   const std::string getAsStr() const override {
1437     return getAssumed() ? "nofree" : "may-free";
1438   }
1439 };
1440 
1441 struct AANoFreeFunction final : public AANoFreeImpl {
AANoFreeFunction__anon0ce335530111::AANoFreeFunction1442   AANoFreeFunction(const IRPosition &IRP, Attributor &A)
1443       : AANoFreeImpl(IRP, A) {}
1444 
1445   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoFreeFunction1446   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1447 };
1448 
1449 /// NoFree attribute deduction for a call sites.
1450 struct AANoFreeCallSite final : AANoFreeImpl {
AANoFreeCallSite__anon0ce335530111::AANoFreeCallSite1451   AANoFreeCallSite(const IRPosition &IRP, Attributor &A)
1452       : AANoFreeImpl(IRP, A) {}
1453 
1454   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoFreeCallSite1455   void initialize(Attributor &A) override {
1456     AANoFreeImpl::initialize(A);
1457     Function *F = getAssociatedFunction();
1458     if (!F || F->isDeclaration())
1459       indicatePessimisticFixpoint();
1460   }
1461 
1462   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoFreeCallSite1463   ChangeStatus updateImpl(Attributor &A) override {
1464     // TODO: Once we have call site specific value information we can provide
1465     //       call site specific liveness information and then it makes
1466     //       sense to specialize attributes for call sites arguments instead of
1467     //       redirecting requests to the callee argument.
1468     Function *F = getAssociatedFunction();
1469     const IRPosition &FnPos = IRPosition::function(*F);
1470     auto &FnAA = A.getAAFor<AANoFree>(*this, FnPos);
1471     return clampStateAndIndicateChange(getState(), FnAA.getState());
1472   }
1473 
1474   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoFreeCallSite1475   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(nofree); }
1476 };
1477 
1478 /// NoFree attribute for floating values.
1479 struct AANoFreeFloating : AANoFreeImpl {
AANoFreeFloating__anon0ce335530111::AANoFreeFloating1480   AANoFreeFloating(const IRPosition &IRP, Attributor &A)
1481       : AANoFreeImpl(IRP, A) {}
1482 
1483   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoFreeFloating1484   void trackStatistics() const override{STATS_DECLTRACK_FLOATING_ATTR(nofree)}
1485 
1486   /// See Abstract Attribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoFreeFloating1487   ChangeStatus updateImpl(Attributor &A) override {
1488     const IRPosition &IRP = getIRPosition();
1489 
1490     const auto &NoFreeAA =
1491         A.getAAFor<AANoFree>(*this, IRPosition::function_scope(IRP));
1492     if (NoFreeAA.isAssumedNoFree())
1493       return ChangeStatus::UNCHANGED;
1494 
1495     Value &AssociatedValue = getIRPosition().getAssociatedValue();
1496     auto Pred = [&](const Use &U, bool &Follow) -> bool {
1497       Instruction *UserI = cast<Instruction>(U.getUser());
1498       if (auto *CB = dyn_cast<CallBase>(UserI)) {
1499         if (CB->isBundleOperand(&U))
1500           return false;
1501         if (!CB->isArgOperand(&U))
1502           return true;
1503         unsigned ArgNo = CB->getArgOperandNo(&U);
1504 
1505         const auto &NoFreeArg = A.getAAFor<AANoFree>(
1506             *this, IRPosition::callsite_argument(*CB, ArgNo));
1507         return NoFreeArg.isAssumedNoFree();
1508       }
1509 
1510       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
1511           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
1512         Follow = true;
1513         return true;
1514       }
1515       if (isa<ReturnInst>(UserI))
1516         return true;
1517 
1518       // Unknown user.
1519       return false;
1520     };
1521     if (!A.checkForAllUses(Pred, *this, AssociatedValue))
1522       return indicatePessimisticFixpoint();
1523 
1524     return ChangeStatus::UNCHANGED;
1525   }
1526 };
1527 
1528 /// NoFree attribute for a call site argument.
1529 struct AANoFreeArgument final : AANoFreeFloating {
AANoFreeArgument__anon0ce335530111::AANoFreeArgument1530   AANoFreeArgument(const IRPosition &IRP, Attributor &A)
1531       : AANoFreeFloating(IRP, A) {}
1532 
1533   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoFreeArgument1534   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nofree) }
1535 };
1536 
1537 /// NoFree attribute for call site arguments.
1538 struct AANoFreeCallSiteArgument final : AANoFreeFloating {
AANoFreeCallSiteArgument__anon0ce335530111::AANoFreeCallSiteArgument1539   AANoFreeCallSiteArgument(const IRPosition &IRP, Attributor &A)
1540       : AANoFreeFloating(IRP, A) {}
1541 
1542   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoFreeCallSiteArgument1543   ChangeStatus updateImpl(Attributor &A) override {
1544     // TODO: Once we have call site specific value information we can provide
1545     //       call site specific liveness information and then it makes
1546     //       sense to specialize attributes for call sites arguments instead of
1547     //       redirecting requests to the callee argument.
1548     Argument *Arg = getAssociatedArgument();
1549     if (!Arg)
1550       return indicatePessimisticFixpoint();
1551     const IRPosition &ArgPos = IRPosition::argument(*Arg);
1552     auto &ArgAA = A.getAAFor<AANoFree>(*this, ArgPos);
1553     return clampStateAndIndicateChange(getState(), ArgAA.getState());
1554   }
1555 
1556   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoFreeCallSiteArgument1557   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nofree)};
1558 };
1559 
1560 /// NoFree attribute for function return value.
1561 struct AANoFreeReturned final : AANoFreeFloating {
AANoFreeReturned__anon0ce335530111::AANoFreeReturned1562   AANoFreeReturned(const IRPosition &IRP, Attributor &A)
1563       : AANoFreeFloating(IRP, A) {
1564     llvm_unreachable("NoFree is not applicable to function returns!");
1565   }
1566 
1567   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoFreeReturned1568   void initialize(Attributor &A) override {
1569     llvm_unreachable("NoFree is not applicable to function returns!");
1570   }
1571 
1572   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoFreeReturned1573   ChangeStatus updateImpl(Attributor &A) override {
1574     llvm_unreachable("NoFree is not applicable to function returns!");
1575   }
1576 
1577   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoFreeReturned1578   void trackStatistics() const override {}
1579 };
1580 
1581 /// NoFree attribute deduction for a call site return value.
1582 struct AANoFreeCallSiteReturned final : AANoFreeFloating {
AANoFreeCallSiteReturned__anon0ce335530111::AANoFreeCallSiteReturned1583   AANoFreeCallSiteReturned(const IRPosition &IRP, Attributor &A)
1584       : AANoFreeFloating(IRP, A) {}
1585 
manifest__anon0ce335530111::AANoFreeCallSiteReturned1586   ChangeStatus manifest(Attributor &A) override {
1587     return ChangeStatus::UNCHANGED;
1588   }
1589   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoFreeCallSiteReturned1590   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nofree) }
1591 };
1592 
1593 /// ------------------------ NonNull Argument Attribute ------------------------
getKnownNonNullAndDerefBytesForUse(Attributor & A,const AbstractAttribute & QueryingAA,Value & AssociatedValue,const Use * U,const Instruction * I,bool & IsNonNull,bool & TrackUse)1594 static int64_t getKnownNonNullAndDerefBytesForUse(
1595     Attributor &A, const AbstractAttribute &QueryingAA, Value &AssociatedValue,
1596     const Use *U, const Instruction *I, bool &IsNonNull, bool &TrackUse) {
1597   TrackUse = false;
1598 
1599   const Value *UseV = U->get();
1600   if (!UseV->getType()->isPointerTy())
1601     return 0;
1602 
1603   Type *PtrTy = UseV->getType();
1604   const Function *F = I->getFunction();
1605   bool NullPointerIsDefined =
1606       F ? llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()) : true;
1607   const DataLayout &DL = A.getInfoCache().getDL();
1608   if (const auto *CB = dyn_cast<CallBase>(I)) {
1609     if (CB->isBundleOperand(U)) {
1610       if (RetainedKnowledge RK = getKnowledgeFromUse(
1611               U, {Attribute::NonNull, Attribute::Dereferenceable})) {
1612         IsNonNull |=
1613             (RK.AttrKind == Attribute::NonNull || !NullPointerIsDefined);
1614         return RK.ArgValue;
1615       }
1616       return 0;
1617     }
1618 
1619     if (CB->isCallee(U)) {
1620       IsNonNull |= !NullPointerIsDefined;
1621       return 0;
1622     }
1623 
1624     unsigned ArgNo = CB->getArgOperandNo(U);
1625     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
1626     // As long as we only use known information there is no need to track
1627     // dependences here.
1628     auto &DerefAA = A.getAAFor<AADereferenceable>(QueryingAA, IRP,
1629                                                   /* TrackDependence */ false);
1630     IsNonNull |= DerefAA.isKnownNonNull();
1631     return DerefAA.getKnownDereferenceableBytes();
1632   }
1633 
1634   // We need to follow common pointer manipulation uses to the accesses they
1635   // feed into. We can try to be smart to avoid looking through things we do not
1636   // like for now, e.g., non-inbounds GEPs.
1637   if (isa<CastInst>(I)) {
1638     TrackUse = true;
1639     return 0;
1640   }
1641 
1642   if (isa<GetElementPtrInst>(I)) {
1643     TrackUse = true;
1644     return 0;
1645   }
1646 
1647   int64_t Offset;
1648   const Value *Base =
1649       getMinimalBaseOfAccsesPointerOperand(A, QueryingAA, I, Offset, DL);
1650   if (Base) {
1651     if (Base == &AssociatedValue &&
1652         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1653       int64_t DerefBytes =
1654           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType()) + Offset;
1655 
1656       IsNonNull |= !NullPointerIsDefined;
1657       return std::max(int64_t(0), DerefBytes);
1658     }
1659   }
1660 
1661   /// Corner case when an offset is 0.
1662   Base = getBasePointerOfAccessPointerOperand(I, Offset, DL,
1663                                               /*AllowNonInbounds*/ true);
1664   if (Base) {
1665     if (Offset == 0 && Base == &AssociatedValue &&
1666         getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
1667       int64_t DerefBytes =
1668           (int64_t)DL.getTypeStoreSize(PtrTy->getPointerElementType());
1669       IsNonNull |= !NullPointerIsDefined;
1670       return std::max(int64_t(0), DerefBytes);
1671     }
1672   }
1673 
1674   return 0;
1675 }
1676 
1677 struct AANonNullImpl : AANonNull {
AANonNullImpl__anon0ce335530111::AANonNullImpl1678   AANonNullImpl(const IRPosition &IRP, Attributor &A)
1679       : AANonNull(IRP, A),
1680         NullIsDefined(NullPointerIsDefined(
1681             getAnchorScope(),
1682             getAssociatedValue().getType()->getPointerAddressSpace())) {}
1683 
1684   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANonNullImpl1685   void initialize(Attributor &A) override {
1686     Value &V = getAssociatedValue();
1687     if (!NullIsDefined &&
1688         hasAttr({Attribute::NonNull, Attribute::Dereferenceable},
1689                 /* IgnoreSubsumingPositions */ false, &A)) {
1690       indicateOptimisticFixpoint();
1691       return;
1692     }
1693 
1694     if (isa<ConstantPointerNull>(V)) {
1695       indicatePessimisticFixpoint();
1696       return;
1697     }
1698 
1699     AANonNull::initialize(A);
1700 
1701     bool CanBeNull = true;
1702     if (V.getPointerDereferenceableBytes(A.getDataLayout(), CanBeNull)) {
1703       if (!CanBeNull) {
1704         indicateOptimisticFixpoint();
1705         return;
1706       }
1707     }
1708 
1709     if (isa<GlobalValue>(&getAssociatedValue())) {
1710       indicatePessimisticFixpoint();
1711       return;
1712     }
1713 
1714     if (Instruction *CtxI = getCtxI())
1715       followUsesInMBEC(*this, A, getState(), *CtxI);
1716   }
1717 
1718   /// See followUsesInMBEC
followUseInMBEC__anon0ce335530111::AANonNullImpl1719   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
1720                        AANonNull::StateType &State) {
1721     bool IsNonNull = false;
1722     bool TrackUse = false;
1723     getKnownNonNullAndDerefBytesForUse(A, *this, getAssociatedValue(), U, I,
1724                                        IsNonNull, TrackUse);
1725     State.setKnown(IsNonNull);
1726     return TrackUse;
1727   }
1728 
1729   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AANonNullImpl1730   const std::string getAsStr() const override {
1731     return getAssumed() ? "nonnull" : "may-null";
1732   }
1733 
1734   /// Flag to determine if the underlying value can be null and still allow
1735   /// valid accesses.
1736   const bool NullIsDefined;
1737 };
1738 
1739 /// NonNull attribute for a floating value.
1740 struct AANonNullFloating : public AANonNullImpl {
AANonNullFloating__anon0ce335530111::AANonNullFloating1741   AANonNullFloating(const IRPosition &IRP, Attributor &A)
1742       : AANonNullImpl(IRP, A) {}
1743 
1744   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANonNullFloating1745   ChangeStatus updateImpl(Attributor &A) override {
1746     const DataLayout &DL = A.getDataLayout();
1747 
1748     DominatorTree *DT = nullptr;
1749     AssumptionCache *AC = nullptr;
1750     InformationCache &InfoCache = A.getInfoCache();
1751     if (const Function *Fn = getAnchorScope()) {
1752       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*Fn);
1753       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*Fn);
1754     }
1755 
1756     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
1757                             AANonNull::StateType &T, bool Stripped) -> bool {
1758       const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1759       if (!Stripped && this == &AA) {
1760         if (!isKnownNonZero(&V, DL, 0, AC, CtxI, DT))
1761           T.indicatePessimisticFixpoint();
1762       } else {
1763         // Use abstract attribute information.
1764         const AANonNull::StateType &NS = AA.getState();
1765         T ^= NS;
1766       }
1767       return T.isValidState();
1768     };
1769 
1770     StateType T;
1771     if (!genericValueTraversal<AANonNull, StateType>(
1772             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
1773       return indicatePessimisticFixpoint();
1774 
1775     return clampStateAndIndicateChange(getState(), T);
1776   }
1777 
1778   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANonNullFloating1779   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1780 };
1781 
1782 /// NonNull attribute for function return value.
1783 struct AANonNullReturned final
1784     : AAReturnedFromReturnedValues<AANonNull, AANonNull> {
AANonNullReturned__anon0ce335530111::AANonNullReturned1785   AANonNullReturned(const IRPosition &IRP, Attributor &A)
1786       : AAReturnedFromReturnedValues<AANonNull, AANonNull>(IRP, A) {}
1787 
1788   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AANonNullReturned1789   const std::string getAsStr() const override {
1790     return getAssumed() ? "nonnull" : "may-null";
1791   }
1792 
1793   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANonNullReturned1794   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1795 };
1796 
1797 /// NonNull attribute for function argument.
1798 struct AANonNullArgument final
1799     : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
AANonNullArgument__anon0ce335530111::AANonNullArgument1800   AANonNullArgument(const IRPosition &IRP, Attributor &A)
1801       : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP, A) {}
1802 
1803   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANonNullArgument1804   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1805 };
1806 
1807 struct AANonNullCallSiteArgument final : AANonNullFloating {
AANonNullCallSiteArgument__anon0ce335530111::AANonNullCallSiteArgument1808   AANonNullCallSiteArgument(const IRPosition &IRP, Attributor &A)
1809       : AANonNullFloating(IRP, A) {}
1810 
1811   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANonNullCallSiteArgument1812   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnull) }
1813 };
1814 
1815 /// NonNull attribute for a call site return position.
1816 struct AANonNullCallSiteReturned final
1817     : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
AANonNullCallSiteReturned__anon0ce335530111::AANonNullCallSiteReturned1818   AANonNullCallSiteReturned(const IRPosition &IRP, Attributor &A)
1819       : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP, A) {}
1820 
1821   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANonNullCallSiteReturned1822   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1823 };
1824 
1825 /// ------------------------ No-Recurse Attributes ----------------------------
1826 
1827 struct AANoRecurseImpl : public AANoRecurse {
AANoRecurseImpl__anon0ce335530111::AANoRecurseImpl1828   AANoRecurseImpl(const IRPosition &IRP, Attributor &A) : AANoRecurse(IRP, A) {}
1829 
1830   /// See AbstractAttribute::getAsStr()
getAsStr__anon0ce335530111::AANoRecurseImpl1831   const std::string getAsStr() const override {
1832     return getAssumed() ? "norecurse" : "may-recurse";
1833   }
1834 };
1835 
1836 struct AANoRecurseFunction final : AANoRecurseImpl {
AANoRecurseFunction__anon0ce335530111::AANoRecurseFunction1837   AANoRecurseFunction(const IRPosition &IRP, Attributor &A)
1838       : AANoRecurseImpl(IRP, A) {}
1839 
1840   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoRecurseFunction1841   void initialize(Attributor &A) override {
1842     AANoRecurseImpl::initialize(A);
1843     if (const Function *F = getAnchorScope())
1844       if (A.getInfoCache().getSccSize(*F) != 1)
1845         indicatePessimisticFixpoint();
1846   }
1847 
1848   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoRecurseFunction1849   ChangeStatus updateImpl(Attributor &A) override {
1850 
1851     // If all live call sites are known to be no-recurse, we are as well.
1852     auto CallSitePred = [&](AbstractCallSite ACS) {
1853       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(
1854           *this, IRPosition::function(*ACS.getInstruction()->getFunction()),
1855           /* TrackDependence */ false, DepClassTy::OPTIONAL);
1856       return NoRecurseAA.isKnownNoRecurse();
1857     };
1858     bool AllCallSitesKnown;
1859     if (A.checkForAllCallSites(CallSitePred, *this, true, AllCallSitesKnown)) {
1860       // If we know all call sites and all are known no-recurse, we are done.
1861       // If all known call sites, which might not be all that exist, are known
1862       // to be no-recurse, we are not done but we can continue to assume
1863       // no-recurse. If one of the call sites we have not visited will become
1864       // live, another update is triggered.
1865       if (AllCallSitesKnown)
1866         indicateOptimisticFixpoint();
1867       return ChangeStatus::UNCHANGED;
1868     }
1869 
1870     // If the above check does not hold anymore we look at the calls.
1871     auto CheckForNoRecurse = [&](Instruction &I) {
1872       const auto &CB = cast<CallBase>(I);
1873       if (CB.hasFnAttr(Attribute::NoRecurse))
1874         return true;
1875 
1876       const auto &NoRecurseAA =
1877           A.getAAFor<AANoRecurse>(*this, IRPosition::callsite_function(CB));
1878       if (!NoRecurseAA.isAssumedNoRecurse())
1879         return false;
1880 
1881       // Recursion to the same function
1882       if (CB.getCalledFunction() == getAnchorScope())
1883         return false;
1884 
1885       return true;
1886     };
1887 
1888     if (!A.checkForAllCallLikeInstructions(CheckForNoRecurse, *this))
1889       return indicatePessimisticFixpoint();
1890     return ChangeStatus::UNCHANGED;
1891   }
1892 
trackStatistics__anon0ce335530111::AANoRecurseFunction1893   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1894 };
1895 
1896 /// NoRecurse attribute deduction for a call sites.
1897 struct AANoRecurseCallSite final : AANoRecurseImpl {
AANoRecurseCallSite__anon0ce335530111::AANoRecurseCallSite1898   AANoRecurseCallSite(const IRPosition &IRP, Attributor &A)
1899       : AANoRecurseImpl(IRP, A) {}
1900 
1901   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoRecurseCallSite1902   void initialize(Attributor &A) override {
1903     AANoRecurseImpl::initialize(A);
1904     Function *F = getAssociatedFunction();
1905     if (!F || F->isDeclaration())
1906       indicatePessimisticFixpoint();
1907   }
1908 
1909   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoRecurseCallSite1910   ChangeStatus updateImpl(Attributor &A) override {
1911     // TODO: Once we have call site specific value information we can provide
1912     //       call site specific liveness information and then it makes
1913     //       sense to specialize attributes for call sites arguments instead of
1914     //       redirecting requests to the callee argument.
1915     Function *F = getAssociatedFunction();
1916     const IRPosition &FnPos = IRPosition::function(*F);
1917     auto &FnAA = A.getAAFor<AANoRecurse>(*this, FnPos);
1918     return clampStateAndIndicateChange(getState(), FnAA.getState());
1919   }
1920 
1921   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoRecurseCallSite1922   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(norecurse); }
1923 };
1924 
1925 /// -------------------- Undefined-Behavior Attributes ------------------------
1926 
1927 struct AAUndefinedBehaviorImpl : public AAUndefinedBehavior {
AAUndefinedBehaviorImpl__anon0ce335530111::AAUndefinedBehaviorImpl1928   AAUndefinedBehaviorImpl(const IRPosition &IRP, Attributor &A)
1929       : AAUndefinedBehavior(IRP, A) {}
1930 
1931   /// See AbstractAttribute::updateImpl(...).
1932   // through a pointer (i.e. also branches etc.)
updateImpl__anon0ce335530111::AAUndefinedBehaviorImpl1933   ChangeStatus updateImpl(Attributor &A) override {
1934     const size_t UBPrevSize = KnownUBInsts.size();
1935     const size_t NoUBPrevSize = AssumedNoUBInsts.size();
1936 
1937     auto InspectMemAccessInstForUB = [&](Instruction &I) {
1938       // Skip instructions that are already saved.
1939       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1940         return true;
1941 
1942       // If we reach here, we know we have an instruction
1943       // that accesses memory through a pointer operand,
1944       // for which getPointerOperand() should give it to us.
1945       const Value *PtrOp = getPointerOperand(&I, /* AllowVolatile */ true);
1946       assert(PtrOp &&
1947              "Expected pointer operand of memory accessing instruction");
1948 
1949       // Either we stopped and the appropriate action was taken,
1950       // or we got back a simplified value to continue.
1951       Optional<Value *> SimplifiedPtrOp = stopOnUndefOrAssumed(A, PtrOp, &I);
1952       if (!SimplifiedPtrOp.hasValue())
1953         return true;
1954       const Value *PtrOpVal = SimplifiedPtrOp.getValue();
1955 
1956       // A memory access through a pointer is considered UB
1957       // only if the pointer has constant null value.
1958       // TODO: Expand it to not only check constant values.
1959       if (!isa<ConstantPointerNull>(PtrOpVal)) {
1960         AssumedNoUBInsts.insert(&I);
1961         return true;
1962       }
1963       const Type *PtrTy = PtrOpVal->getType();
1964 
1965       // Because we only consider instructions inside functions,
1966       // assume that a parent function exists.
1967       const Function *F = I.getFunction();
1968 
1969       // A memory access using constant null pointer is only considered UB
1970       // if null pointer is _not_ defined for the target platform.
1971       if (llvm::NullPointerIsDefined(F, PtrTy->getPointerAddressSpace()))
1972         AssumedNoUBInsts.insert(&I);
1973       else
1974         KnownUBInsts.insert(&I);
1975       return true;
1976     };
1977 
1978     auto InspectBrInstForUB = [&](Instruction &I) {
1979       // A conditional branch instruction is considered UB if it has `undef`
1980       // condition.
1981 
1982       // Skip instructions that are already saved.
1983       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
1984         return true;
1985 
1986       // We know we have a branch instruction.
1987       auto BrInst = cast<BranchInst>(&I);
1988 
1989       // Unconditional branches are never considered UB.
1990       if (BrInst->isUnconditional())
1991         return true;
1992 
1993       // Either we stopped and the appropriate action was taken,
1994       // or we got back a simplified value to continue.
1995       Optional<Value *> SimplifiedCond =
1996           stopOnUndefOrAssumed(A, BrInst->getCondition(), BrInst);
1997       if (!SimplifiedCond.hasValue())
1998         return true;
1999       AssumedNoUBInsts.insert(&I);
2000       return true;
2001     };
2002 
2003     auto InspectCallSiteForUB = [&](Instruction &I) {
2004       // Check whether a callsite always cause UB or not
2005 
2006       // Skip instructions that are already saved.
2007       if (AssumedNoUBInsts.count(&I) || KnownUBInsts.count(&I))
2008         return true;
2009 
2010       // Check nonnull and noundef argument attribute violation for each
2011       // callsite.
2012       CallBase &CB = cast<CallBase>(I);
2013       Function *Callee = CB.getCalledFunction();
2014       if (!Callee)
2015         return true;
2016       for (unsigned idx = 0; idx < CB.getNumArgOperands(); idx++) {
2017         // If current argument is known to be simplified to null pointer and the
2018         // corresponding argument position is known to have nonnull attribute,
2019         // the argument is poison. Furthermore, if the argument is poison and
2020         // the position is known to have noundef attriubte, this callsite is
2021         // considered UB.
2022         if (idx >= Callee->arg_size())
2023           break;
2024         Value *ArgVal = CB.getArgOperand(idx);
2025         if (!ArgVal)
2026           continue;
2027         // Here, we handle three cases.
2028         //   (1) Not having a value means it is dead. (we can replace the value
2029         //       with undef)
2030         //   (2) Simplified to undef. The argument violate noundef attriubte.
2031         //   (3) Simplified to null pointer where known to be nonnull.
2032         //       The argument is a poison value and violate noundef attribute.
2033         IRPosition CalleeArgumentIRP = IRPosition::callsite_argument(CB, idx);
2034         auto &NoUndefAA = A.getAAFor<AANoUndef>(*this, CalleeArgumentIRP,
2035                                                 /* TrackDependence */ false);
2036         if (!NoUndefAA.isKnownNoUndef())
2037           continue;
2038         auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
2039             *this, IRPosition::value(*ArgVal), /* TrackDependence */ false);
2040         if (!ValueSimplifyAA.isKnown())
2041           continue;
2042         Optional<Value *> SimplifiedVal =
2043             ValueSimplifyAA.getAssumedSimplifiedValue(A);
2044         if (!SimplifiedVal.hasValue() ||
2045             isa<UndefValue>(*SimplifiedVal.getValue())) {
2046           KnownUBInsts.insert(&I);
2047           continue;
2048         }
2049         if (!ArgVal->getType()->isPointerTy() ||
2050             !isa<ConstantPointerNull>(*SimplifiedVal.getValue()))
2051           continue;
2052         auto &NonNullAA = A.getAAFor<AANonNull>(*this, CalleeArgumentIRP,
2053                                                 /* TrackDependence */ false);
2054         if (NonNullAA.isKnownNonNull())
2055           KnownUBInsts.insert(&I);
2056       }
2057       return true;
2058     };
2059 
2060     auto InspectReturnInstForUB =
2061         [&](Value &V, const SmallSetVector<ReturnInst *, 4> RetInsts) {
2062           // Check if a return instruction always cause UB or not
2063           // Note: It is guaranteed that the returned position of the anchor
2064           //       scope has noundef attribute when this is called.
2065           //       We also ensure the return position is not "assumed dead"
2066           //       because the returned value was then potentially simplified to
2067           //       `undef` in AAReturnedValues without removing the `noundef`
2068           //       attribute yet.
2069 
2070           // When the returned position has noundef attriubte, UB occur in the
2071           // following cases.
2072           //   (1) Returned value is known to be undef.
2073           //   (2) The value is known to be a null pointer and the returned
2074           //       position has nonnull attribute (because the returned value is
2075           //       poison).
2076           bool FoundUB = false;
2077           if (isa<UndefValue>(V)) {
2078             FoundUB = true;
2079           } else {
2080             if (isa<ConstantPointerNull>(V)) {
2081               auto &NonNullAA = A.getAAFor<AANonNull>(
2082                   *this, IRPosition::returned(*getAnchorScope()),
2083                   /* TrackDependence */ false);
2084               if (NonNullAA.isKnownNonNull())
2085                 FoundUB = true;
2086             }
2087           }
2088 
2089           if (FoundUB)
2090             for (ReturnInst *RI : RetInsts)
2091               KnownUBInsts.insert(RI);
2092           return true;
2093         };
2094 
2095     A.checkForAllInstructions(InspectMemAccessInstForUB, *this,
2096                               {Instruction::Load, Instruction::Store,
2097                                Instruction::AtomicCmpXchg,
2098                                Instruction::AtomicRMW},
2099                               /* CheckBBLivenessOnly */ true);
2100     A.checkForAllInstructions(InspectBrInstForUB, *this, {Instruction::Br},
2101                               /* CheckBBLivenessOnly */ true);
2102     A.checkForAllCallLikeInstructions(InspectCallSiteForUB, *this);
2103 
2104     // If the returned position of the anchor scope has noundef attriubte, check
2105     // all returned instructions.
2106     if (!getAnchorScope()->getReturnType()->isVoidTy()) {
2107       const IRPosition &ReturnIRP = IRPosition::returned(*getAnchorScope());
2108       if (!A.isAssumedDead(ReturnIRP, this, nullptr)) {
2109         auto &RetPosNoUndefAA =
2110             A.getAAFor<AANoUndef>(*this, ReturnIRP,
2111                                   /* TrackDependence */ false);
2112         if (RetPosNoUndefAA.isKnownNoUndef())
2113           A.checkForAllReturnedValuesAndReturnInsts(InspectReturnInstForUB,
2114                                                     *this);
2115       }
2116     }
2117 
2118     if (NoUBPrevSize != AssumedNoUBInsts.size() ||
2119         UBPrevSize != KnownUBInsts.size())
2120       return ChangeStatus::CHANGED;
2121     return ChangeStatus::UNCHANGED;
2122   }
2123 
isKnownToCauseUB__anon0ce335530111::AAUndefinedBehaviorImpl2124   bool isKnownToCauseUB(Instruction *I) const override {
2125     return KnownUBInsts.count(I);
2126   }
2127 
isAssumedToCauseUB__anon0ce335530111::AAUndefinedBehaviorImpl2128   bool isAssumedToCauseUB(Instruction *I) const override {
2129     // In simple words, if an instruction is not in the assumed to _not_
2130     // cause UB, then it is assumed UB (that includes those
2131     // in the KnownUBInsts set). The rest is boilerplate
2132     // is to ensure that it is one of the instructions we test
2133     // for UB.
2134 
2135     switch (I->getOpcode()) {
2136     case Instruction::Load:
2137     case Instruction::Store:
2138     case Instruction::AtomicCmpXchg:
2139     case Instruction::AtomicRMW:
2140       return !AssumedNoUBInsts.count(I);
2141     case Instruction::Br: {
2142       auto BrInst = cast<BranchInst>(I);
2143       if (BrInst->isUnconditional())
2144         return false;
2145       return !AssumedNoUBInsts.count(I);
2146     } break;
2147     default:
2148       return false;
2149     }
2150     return false;
2151   }
2152 
manifest__anon0ce335530111::AAUndefinedBehaviorImpl2153   ChangeStatus manifest(Attributor &A) override {
2154     if (KnownUBInsts.empty())
2155       return ChangeStatus::UNCHANGED;
2156     for (Instruction *I : KnownUBInsts)
2157       A.changeToUnreachableAfterManifest(I);
2158     return ChangeStatus::CHANGED;
2159   }
2160 
2161   /// See AbstractAttribute::getAsStr()
getAsStr__anon0ce335530111::AAUndefinedBehaviorImpl2162   const std::string getAsStr() const override {
2163     return getAssumed() ? "undefined-behavior" : "no-ub";
2164   }
2165 
2166   /// Note: The correctness of this analysis depends on the fact that the
2167   /// following 2 sets will stop changing after some point.
2168   /// "Change" here means that their size changes.
2169   /// The size of each set is monotonically increasing
2170   /// (we only add items to them) and it is upper bounded by the number of
2171   /// instructions in the processed function (we can never save more
2172   /// elements in either set than this number). Hence, at some point,
2173   /// they will stop increasing.
2174   /// Consequently, at some point, both sets will have stopped
2175   /// changing, effectively making the analysis reach a fixpoint.
2176 
2177   /// Note: These 2 sets are disjoint and an instruction can be considered
2178   /// one of 3 things:
2179   /// 1) Known to cause UB (AAUndefinedBehavior could prove it) and put it in
2180   ///    the KnownUBInsts set.
2181   /// 2) Assumed to cause UB (in every updateImpl, AAUndefinedBehavior
2182   ///    has a reason to assume it).
2183   /// 3) Assumed to not cause UB. very other instruction - AAUndefinedBehavior
2184   ///    could not find a reason to assume or prove that it can cause UB,
2185   ///    hence it assumes it doesn't. We have a set for these instructions
2186   ///    so that we don't reprocess them in every update.
2187   ///    Note however that instructions in this set may cause UB.
2188 
2189 protected:
2190   /// A set of all live instructions _known_ to cause UB.
2191   SmallPtrSet<Instruction *, 8> KnownUBInsts;
2192 
2193 private:
2194   /// A set of all the (live) instructions that are assumed to _not_ cause UB.
2195   SmallPtrSet<Instruction *, 8> AssumedNoUBInsts;
2196 
2197   // Should be called on updates in which if we're processing an instruction
2198   // \p I that depends on a value \p V, one of the following has to happen:
2199   // - If the value is assumed, then stop.
2200   // - If the value is known but undef, then consider it UB.
2201   // - Otherwise, do specific processing with the simplified value.
2202   // We return None in the first 2 cases to signify that an appropriate
2203   // action was taken and the caller should stop.
2204   // Otherwise, we return the simplified value that the caller should
2205   // use for specific processing.
stopOnUndefOrAssumed__anon0ce335530111::AAUndefinedBehaviorImpl2206   Optional<Value *> stopOnUndefOrAssumed(Attributor &A, const Value *V,
2207                                          Instruction *I) {
2208     const auto &ValueSimplifyAA =
2209         A.getAAFor<AAValueSimplify>(*this, IRPosition::value(*V));
2210     Optional<Value *> SimplifiedV =
2211         ValueSimplifyAA.getAssumedSimplifiedValue(A);
2212     if (!ValueSimplifyAA.isKnown()) {
2213       // Don't depend on assumed values.
2214       return llvm::None;
2215     }
2216     if (!SimplifiedV.hasValue()) {
2217       // If it is known (which we tested above) but it doesn't have a value,
2218       // then we can assume `undef` and hence the instruction is UB.
2219       KnownUBInsts.insert(I);
2220       return llvm::None;
2221     }
2222     Value *Val = SimplifiedV.getValue();
2223     if (isa<UndefValue>(Val)) {
2224       KnownUBInsts.insert(I);
2225       return llvm::None;
2226     }
2227     return Val;
2228   }
2229 };
2230 
2231 struct AAUndefinedBehaviorFunction final : AAUndefinedBehaviorImpl {
AAUndefinedBehaviorFunction__anon0ce335530111::AAUndefinedBehaviorFunction2232   AAUndefinedBehaviorFunction(const IRPosition &IRP, Attributor &A)
2233       : AAUndefinedBehaviorImpl(IRP, A) {}
2234 
2235   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAUndefinedBehaviorFunction2236   void trackStatistics() const override {
2237     STATS_DECL(UndefinedBehaviorInstruction, Instruction,
2238                "Number of instructions known to have UB");
2239     BUILD_STAT_NAME(UndefinedBehaviorInstruction, Instruction) +=
2240         KnownUBInsts.size();
2241   }
2242 };
2243 
2244 /// ------------------------ Will-Return Attributes ----------------------------
2245 
2246 // Helper function that checks whether a function has any cycle which we don't
2247 // know if it is bounded or not.
2248 // Loops with maximum trip count are considered bounded, any other cycle not.
mayContainUnboundedCycle(Function & F,Attributor & A)2249 static bool mayContainUnboundedCycle(Function &F, Attributor &A) {
2250   ScalarEvolution *SE =
2251       A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(F);
2252   LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(F);
2253   // If either SCEV or LoopInfo is not available for the function then we assume
2254   // any cycle to be unbounded cycle.
2255   // We use scc_iterator which uses Tarjan algorithm to find all the maximal
2256   // SCCs.To detect if there's a cycle, we only need to find the maximal ones.
2257   if (!SE || !LI) {
2258     for (scc_iterator<Function *> SCCI = scc_begin(&F); !SCCI.isAtEnd(); ++SCCI)
2259       if (SCCI.hasCycle())
2260         return true;
2261     return false;
2262   }
2263 
2264   // If there's irreducible control, the function may contain non-loop cycles.
2265   if (mayContainIrreducibleControl(F, LI))
2266     return true;
2267 
2268   // Any loop that does not have a max trip count is considered unbounded cycle.
2269   for (auto *L : LI->getLoopsInPreorder()) {
2270     if (!SE->getSmallConstantMaxTripCount(L))
2271       return true;
2272   }
2273   return false;
2274 }
2275 
2276 struct AAWillReturnImpl : public AAWillReturn {
AAWillReturnImpl__anon0ce335530111::AAWillReturnImpl2277   AAWillReturnImpl(const IRPosition &IRP, Attributor &A)
2278       : AAWillReturn(IRP, A) {}
2279 
2280   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAWillReturnImpl2281   void initialize(Attributor &A) override {
2282     AAWillReturn::initialize(A);
2283 
2284     Function *F = getAnchorScope();
2285     if (!F || F->isDeclaration() || mayContainUnboundedCycle(*F, A))
2286       indicatePessimisticFixpoint();
2287   }
2288 
2289   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAWillReturnImpl2290   ChangeStatus updateImpl(Attributor &A) override {
2291     auto CheckForWillReturn = [&](Instruction &I) {
2292       IRPosition IPos = IRPosition::callsite_function(cast<CallBase>(I));
2293       const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
2294       if (WillReturnAA.isKnownWillReturn())
2295         return true;
2296       if (!WillReturnAA.isAssumedWillReturn())
2297         return false;
2298       const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
2299       return NoRecurseAA.isAssumedNoRecurse();
2300     };
2301 
2302     if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
2303       return indicatePessimisticFixpoint();
2304 
2305     return ChangeStatus::UNCHANGED;
2306   }
2307 
2308   /// See AbstractAttribute::getAsStr()
getAsStr__anon0ce335530111::AAWillReturnImpl2309   const std::string getAsStr() const override {
2310     return getAssumed() ? "willreturn" : "may-noreturn";
2311   }
2312 };
2313 
2314 struct AAWillReturnFunction final : AAWillReturnImpl {
AAWillReturnFunction__anon0ce335530111::AAWillReturnFunction2315   AAWillReturnFunction(const IRPosition &IRP, Attributor &A)
2316       : AAWillReturnImpl(IRP, A) {}
2317 
2318   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAWillReturnFunction2319   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
2320 };
2321 
2322 /// WillReturn attribute deduction for a call sites.
2323 struct AAWillReturnCallSite final : AAWillReturnImpl {
AAWillReturnCallSite__anon0ce335530111::AAWillReturnCallSite2324   AAWillReturnCallSite(const IRPosition &IRP, Attributor &A)
2325       : AAWillReturnImpl(IRP, A) {}
2326 
2327   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAWillReturnCallSite2328   void initialize(Attributor &A) override {
2329     AAWillReturn::initialize(A);
2330     Function *F = getAssociatedFunction();
2331     if (!F || !A.isFunctionIPOAmendable(*F))
2332       indicatePessimisticFixpoint();
2333   }
2334 
2335   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAWillReturnCallSite2336   ChangeStatus updateImpl(Attributor &A) override {
2337     // TODO: Once we have call site specific value information we can provide
2338     //       call site specific liveness information and then it makes
2339     //       sense to specialize attributes for call sites arguments instead of
2340     //       redirecting requests to the callee argument.
2341     Function *F = getAssociatedFunction();
2342     const IRPosition &FnPos = IRPosition::function(*F);
2343     auto &FnAA = A.getAAFor<AAWillReturn>(*this, FnPos);
2344     return clampStateAndIndicateChange(getState(), FnAA.getState());
2345   }
2346 
2347   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAWillReturnCallSite2348   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(willreturn); }
2349 };
2350 
2351 /// -------------------AAReachability Attribute--------------------------
2352 
2353 struct AAReachabilityImpl : AAReachability {
AAReachabilityImpl__anon0ce335530111::AAReachabilityImpl2354   AAReachabilityImpl(const IRPosition &IRP, Attributor &A)
2355       : AAReachability(IRP, A) {}
2356 
getAsStr__anon0ce335530111::AAReachabilityImpl2357   const std::string getAsStr() const override {
2358     // TODO: Return the number of reachable queries.
2359     return "reachable";
2360   }
2361 
2362   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAReachabilityImpl2363   void initialize(Attributor &A) override { indicatePessimisticFixpoint(); }
2364 
2365   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAReachabilityImpl2366   ChangeStatus updateImpl(Attributor &A) override {
2367     return indicatePessimisticFixpoint();
2368   }
2369 };
2370 
2371 struct AAReachabilityFunction final : public AAReachabilityImpl {
AAReachabilityFunction__anon0ce335530111::AAReachabilityFunction2372   AAReachabilityFunction(const IRPosition &IRP, Attributor &A)
2373       : AAReachabilityImpl(IRP, A) {}
2374 
2375   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAReachabilityFunction2376   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(reachable); }
2377 };
2378 
2379 /// ------------------------ NoAlias Argument Attribute ------------------------
2380 
2381 struct AANoAliasImpl : AANoAlias {
AANoAliasImpl__anon0ce335530111::AANoAliasImpl2382   AANoAliasImpl(const IRPosition &IRP, Attributor &A) : AANoAlias(IRP, A) {
2383     assert(getAssociatedType()->isPointerTy() &&
2384            "Noalias is a pointer attribute");
2385   }
2386 
getAsStr__anon0ce335530111::AANoAliasImpl2387   const std::string getAsStr() const override {
2388     return getAssumed() ? "noalias" : "may-alias";
2389   }
2390 };
2391 
2392 /// NoAlias attribute for a floating value.
2393 struct AANoAliasFloating final : AANoAliasImpl {
AANoAliasFloating__anon0ce335530111::AANoAliasFloating2394   AANoAliasFloating(const IRPosition &IRP, Attributor &A)
2395       : AANoAliasImpl(IRP, A) {}
2396 
2397   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoAliasFloating2398   void initialize(Attributor &A) override {
2399     AANoAliasImpl::initialize(A);
2400     Value *Val = &getAssociatedValue();
2401     do {
2402       CastInst *CI = dyn_cast<CastInst>(Val);
2403       if (!CI)
2404         break;
2405       Value *Base = CI->getOperand(0);
2406       if (!Base->hasOneUse())
2407         break;
2408       Val = Base;
2409     } while (true);
2410 
2411     if (!Val->getType()->isPointerTy()) {
2412       indicatePessimisticFixpoint();
2413       return;
2414     }
2415 
2416     if (isa<AllocaInst>(Val))
2417       indicateOptimisticFixpoint();
2418     else if (isa<ConstantPointerNull>(Val) &&
2419              !NullPointerIsDefined(getAnchorScope(),
2420                                    Val->getType()->getPointerAddressSpace()))
2421       indicateOptimisticFixpoint();
2422     else if (Val != &getAssociatedValue()) {
2423       const auto &ValNoAliasAA =
2424           A.getAAFor<AANoAlias>(*this, IRPosition::value(*Val));
2425       if (ValNoAliasAA.isKnownNoAlias())
2426         indicateOptimisticFixpoint();
2427     }
2428   }
2429 
2430   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoAliasFloating2431   ChangeStatus updateImpl(Attributor &A) override {
2432     // TODO: Implement this.
2433     return indicatePessimisticFixpoint();
2434   }
2435 
2436   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoAliasFloating2437   void trackStatistics() const override {
2438     STATS_DECLTRACK_FLOATING_ATTR(noalias)
2439   }
2440 };
2441 
2442 /// NoAlias attribute for an argument.
2443 struct AANoAliasArgument final
2444     : AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl> {
2445   using Base = AAArgumentFromCallSiteArguments<AANoAlias, AANoAliasImpl>;
AANoAliasArgument__anon0ce335530111::AANoAliasArgument2446   AANoAliasArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
2447 
2448   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoAliasArgument2449   void initialize(Attributor &A) override {
2450     Base::initialize(A);
2451     // See callsite argument attribute and callee argument attribute.
2452     if (hasAttr({Attribute::ByVal}))
2453       indicateOptimisticFixpoint();
2454   }
2455 
2456   /// See AbstractAttribute::update(...).
updateImpl__anon0ce335530111::AANoAliasArgument2457   ChangeStatus updateImpl(Attributor &A) override {
2458     // We have to make sure no-alias on the argument does not break
2459     // synchronization when this is a callback argument, see also [1] below.
2460     // If synchronization cannot be affected, we delegate to the base updateImpl
2461     // function, otherwise we give up for now.
2462 
2463     // If the function is no-sync, no-alias cannot break synchronization.
2464     const auto &NoSyncAA = A.getAAFor<AANoSync>(
2465         *this, IRPosition::function_scope(getIRPosition()));
2466     if (NoSyncAA.isAssumedNoSync())
2467       return Base::updateImpl(A);
2468 
2469     // If the argument is read-only, no-alias cannot break synchronization.
2470     const auto &MemBehaviorAA =
2471         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
2472     if (MemBehaviorAA.isAssumedReadOnly())
2473       return Base::updateImpl(A);
2474 
2475     // If the argument is never passed through callbacks, no-alias cannot break
2476     // synchronization.
2477     bool AllCallSitesKnown;
2478     if (A.checkForAllCallSites(
2479             [](AbstractCallSite ACS) { return !ACS.isCallbackCall(); }, *this,
2480             true, AllCallSitesKnown))
2481       return Base::updateImpl(A);
2482 
2483     // TODO: add no-alias but make sure it doesn't break synchronization by
2484     // introducing fake uses. See:
2485     // [1] Compiler Optimizations for OpenMP, J. Doerfert and H. Finkel,
2486     //     International Workshop on OpenMP 2018,
2487     //     http://compilers.cs.uni-saarland.de/people/doerfert/par_opt18.pdf
2488 
2489     return indicatePessimisticFixpoint();
2490   }
2491 
2492   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoAliasArgument2493   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
2494 };
2495 
2496 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
AANoAliasCallSiteArgument__anon0ce335530111::AANoAliasCallSiteArgument2497   AANoAliasCallSiteArgument(const IRPosition &IRP, Attributor &A)
2498       : AANoAliasImpl(IRP, A) {}
2499 
2500   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoAliasCallSiteArgument2501   void initialize(Attributor &A) override {
2502     // See callsite argument attribute and callee argument attribute.
2503     const auto &CB = cast<CallBase>(getAnchorValue());
2504     if (CB.paramHasAttr(getCallSiteArgNo(), Attribute::NoAlias))
2505       indicateOptimisticFixpoint();
2506     Value &Val = getAssociatedValue();
2507     if (isa<ConstantPointerNull>(Val) &&
2508         !NullPointerIsDefined(getAnchorScope(),
2509                               Val.getType()->getPointerAddressSpace()))
2510       indicateOptimisticFixpoint();
2511   }
2512 
2513   /// Determine if the underlying value may alias with the call site argument
2514   /// \p OtherArgNo of \p ICS (= the underlying call site).
mayAliasWithArgument__anon0ce335530111::AANoAliasCallSiteArgument2515   bool mayAliasWithArgument(Attributor &A, AAResults *&AAR,
2516                             const AAMemoryBehavior &MemBehaviorAA,
2517                             const CallBase &CB, unsigned OtherArgNo) {
2518     // We do not need to worry about aliasing with the underlying IRP.
2519     if (this->getCalleeArgNo() == (int)OtherArgNo)
2520       return false;
2521 
2522     // If it is not a pointer or pointer vector we do not alias.
2523     const Value *ArgOp = CB.getArgOperand(OtherArgNo);
2524     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
2525       return false;
2526 
2527     auto &CBArgMemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
2528         *this, IRPosition::callsite_argument(CB, OtherArgNo),
2529         /* TrackDependence */ false);
2530 
2531     // If the argument is readnone, there is no read-write aliasing.
2532     if (CBArgMemBehaviorAA.isAssumedReadNone()) {
2533       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2534       return false;
2535     }
2536 
2537     // If the argument is readonly and the underlying value is readonly, there
2538     // is no read-write aliasing.
2539     bool IsReadOnly = MemBehaviorAA.isAssumedReadOnly();
2540     if (CBArgMemBehaviorAA.isAssumedReadOnly() && IsReadOnly) {
2541       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2542       A.recordDependence(CBArgMemBehaviorAA, *this, DepClassTy::OPTIONAL);
2543       return false;
2544     }
2545 
2546     // We have to utilize actual alias analysis queries so we need the object.
2547     if (!AAR)
2548       AAR = A.getInfoCache().getAAResultsForFunction(*getAnchorScope());
2549 
2550     // Try to rule it out at the call site.
2551     bool IsAliasing = !AAR || !AAR->isNoAlias(&getAssociatedValue(), ArgOp);
2552     LLVM_DEBUG(dbgs() << "[NoAliasCSArg] Check alias between "
2553                          "callsite arguments: "
2554                       << getAssociatedValue() << " " << *ArgOp << " => "
2555                       << (IsAliasing ? "" : "no-") << "alias \n");
2556 
2557     return IsAliasing;
2558   }
2559 
2560   bool
isKnownNoAliasDueToNoAliasPreservation__anon0ce335530111::AANoAliasCallSiteArgument2561   isKnownNoAliasDueToNoAliasPreservation(Attributor &A, AAResults *&AAR,
2562                                          const AAMemoryBehavior &MemBehaviorAA,
2563                                          const AANoAlias &NoAliasAA) {
2564     // We can deduce "noalias" if the following conditions hold.
2565     // (i)   Associated value is assumed to be noalias in the definition.
2566     // (ii)  Associated value is assumed to be no-capture in all the uses
2567     //       possibly executed before this callsite.
2568     // (iii) There is no other pointer argument which could alias with the
2569     //       value.
2570 
2571     bool AssociatedValueIsNoAliasAtDef = NoAliasAA.isAssumedNoAlias();
2572     if (!AssociatedValueIsNoAliasAtDef) {
2573       LLVM_DEBUG(dbgs() << "[AANoAlias] " << getAssociatedValue()
2574                         << " is not no-alias at the definition\n");
2575       return false;
2576     }
2577 
2578     A.recordDependence(NoAliasAA, *this, DepClassTy::OPTIONAL);
2579 
2580     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2581     const Function *ScopeFn = VIRP.getAnchorScope();
2582     auto &NoCaptureAA =
2583         A.getAAFor<AANoCapture>(*this, VIRP, /* TrackDependence */ false);
2584     // Check whether the value is captured in the scope using AANoCapture.
2585     //      Look at CFG and check only uses possibly executed before this
2586     //      callsite.
2587     auto UsePred = [&](const Use &U, bool &Follow) -> bool {
2588       Instruction *UserI = cast<Instruction>(U.getUser());
2589 
2590       // If UserI is the curr instruction and there is a single potential use of
2591       // the value in UserI we allow the use.
2592       // TODO: We should inspect the operands and allow those that cannot alias
2593       //       with the value.
2594       if (UserI == getCtxI() && UserI->getNumOperands() == 1)
2595         return true;
2596 
2597       if (ScopeFn) {
2598         const auto &ReachabilityAA =
2599             A.getAAFor<AAReachability>(*this, IRPosition::function(*ScopeFn));
2600 
2601         if (!ReachabilityAA.isAssumedReachable(A, *UserI, *getCtxI()))
2602           return true;
2603 
2604         if (auto *CB = dyn_cast<CallBase>(UserI)) {
2605           if (CB->isArgOperand(&U)) {
2606 
2607             unsigned ArgNo = CB->getArgOperandNo(&U);
2608 
2609             const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
2610                 *this, IRPosition::callsite_argument(*CB, ArgNo));
2611 
2612             if (NoCaptureAA.isAssumedNoCapture())
2613               return true;
2614           }
2615         }
2616       }
2617 
2618       // For cases which can potentially have more users
2619       if (isa<GetElementPtrInst>(U) || isa<BitCastInst>(U) || isa<PHINode>(U) ||
2620           isa<SelectInst>(U)) {
2621         Follow = true;
2622         return true;
2623       }
2624 
2625       LLVM_DEBUG(dbgs() << "[AANoAliasCSArg] Unknown user: " << *U << "\n");
2626       return false;
2627     };
2628 
2629     if (!NoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
2630       if (!A.checkForAllUses(UsePred, *this, getAssociatedValue())) {
2631         LLVM_DEBUG(
2632             dbgs() << "[AANoAliasCSArg] " << getAssociatedValue()
2633                    << " cannot be noalias as it is potentially captured\n");
2634         return false;
2635       }
2636     }
2637     A.recordDependence(NoCaptureAA, *this, DepClassTy::OPTIONAL);
2638 
2639     // Check there is no other pointer argument which could alias with the
2640     // value passed at this call site.
2641     // TODO: AbstractCallSite
2642     const auto &CB = cast<CallBase>(getAnchorValue());
2643     for (unsigned OtherArgNo = 0; OtherArgNo < CB.getNumArgOperands();
2644          OtherArgNo++)
2645       if (mayAliasWithArgument(A, AAR, MemBehaviorAA, CB, OtherArgNo))
2646         return false;
2647 
2648     return true;
2649   }
2650 
2651   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoAliasCallSiteArgument2652   ChangeStatus updateImpl(Attributor &A) override {
2653     // If the argument is readnone we are done as there are no accesses via the
2654     // argument.
2655     auto &MemBehaviorAA =
2656         A.getAAFor<AAMemoryBehavior>(*this, getIRPosition(),
2657                                      /* TrackDependence */ false);
2658     if (MemBehaviorAA.isAssumedReadNone()) {
2659       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2660       return ChangeStatus::UNCHANGED;
2661     }
2662 
2663     const IRPosition &VIRP = IRPosition::value(getAssociatedValue());
2664     const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, VIRP,
2665                                                   /* TrackDependence */ false);
2666 
2667     AAResults *AAR = nullptr;
2668     if (isKnownNoAliasDueToNoAliasPreservation(A, AAR, MemBehaviorAA,
2669                                                NoAliasAA)) {
2670       LLVM_DEBUG(
2671           dbgs() << "[AANoAlias] No-Alias deduced via no-alias preservation\n");
2672       return ChangeStatus::UNCHANGED;
2673     }
2674 
2675     return indicatePessimisticFixpoint();
2676   }
2677 
2678   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoAliasCallSiteArgument2679   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noalias) }
2680 };
2681 
2682 /// NoAlias attribute for function return value.
2683 struct AANoAliasReturned final : AANoAliasImpl {
AANoAliasReturned__anon0ce335530111::AANoAliasReturned2684   AANoAliasReturned(const IRPosition &IRP, Attributor &A)
2685       : AANoAliasImpl(IRP, A) {}
2686 
2687   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoAliasReturned2688   void initialize(Attributor &A) override {
2689     AANoAliasImpl::initialize(A);
2690     Function *F = getAssociatedFunction();
2691     if (!F || F->isDeclaration())
2692       indicatePessimisticFixpoint();
2693   }
2694 
2695   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoAliasReturned2696   virtual ChangeStatus updateImpl(Attributor &A) override {
2697 
2698     auto CheckReturnValue = [&](Value &RV) -> bool {
2699       if (Constant *C = dyn_cast<Constant>(&RV))
2700         if (C->isNullValue() || isa<UndefValue>(C))
2701           return true;
2702 
2703       /// For now, we can only deduce noalias if we have call sites.
2704       /// FIXME: add more support.
2705       if (!isa<CallBase>(&RV))
2706         return false;
2707 
2708       const IRPosition &RVPos = IRPosition::value(RV);
2709       const auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, RVPos);
2710       if (!NoAliasAA.isAssumedNoAlias())
2711         return false;
2712 
2713       const auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, RVPos);
2714       return NoCaptureAA.isAssumedNoCaptureMaybeReturned();
2715     };
2716 
2717     if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
2718       return indicatePessimisticFixpoint();
2719 
2720     return ChangeStatus::UNCHANGED;
2721   }
2722 
2723   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoAliasReturned2724   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
2725 };
2726 
2727 /// NoAlias attribute deduction for a call site return value.
2728 struct AANoAliasCallSiteReturned final : AANoAliasImpl {
AANoAliasCallSiteReturned__anon0ce335530111::AANoAliasCallSiteReturned2729   AANoAliasCallSiteReturned(const IRPosition &IRP, Attributor &A)
2730       : AANoAliasImpl(IRP, A) {}
2731 
2732   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoAliasCallSiteReturned2733   void initialize(Attributor &A) override {
2734     AANoAliasImpl::initialize(A);
2735     Function *F = getAssociatedFunction();
2736     if (!F || F->isDeclaration())
2737       indicatePessimisticFixpoint();
2738   }
2739 
2740   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoAliasCallSiteReturned2741   ChangeStatus updateImpl(Attributor &A) override {
2742     // TODO: Once we have call site specific value information we can provide
2743     //       call site specific liveness information and then it makes
2744     //       sense to specialize attributes for call sites arguments instead of
2745     //       redirecting requests to the callee argument.
2746     Function *F = getAssociatedFunction();
2747     const IRPosition &FnPos = IRPosition::returned(*F);
2748     auto &FnAA = A.getAAFor<AANoAlias>(*this, FnPos);
2749     return clampStateAndIndicateChange(getState(), FnAA.getState());
2750   }
2751 
2752   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoAliasCallSiteReturned2753   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noalias); }
2754 };
2755 
2756 /// -------------------AAIsDead Function Attribute-----------------------
2757 
2758 struct AAIsDeadValueImpl : public AAIsDead {
AAIsDeadValueImpl__anon0ce335530111::AAIsDeadValueImpl2759   AAIsDeadValueImpl(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
2760 
2761   /// See AAIsDead::isAssumedDead().
isAssumedDead__anon0ce335530111::AAIsDeadValueImpl2762   bool isAssumedDead() const override { return getAssumed(); }
2763 
2764   /// See AAIsDead::isKnownDead().
isKnownDead__anon0ce335530111::AAIsDeadValueImpl2765   bool isKnownDead() const override { return getKnown(); }
2766 
2767   /// See AAIsDead::isAssumedDead(BasicBlock *).
isAssumedDead__anon0ce335530111::AAIsDeadValueImpl2768   bool isAssumedDead(const BasicBlock *BB) const override { return false; }
2769 
2770   /// See AAIsDead::isKnownDead(BasicBlock *).
isKnownDead__anon0ce335530111::AAIsDeadValueImpl2771   bool isKnownDead(const BasicBlock *BB) const override { return false; }
2772 
2773   /// See AAIsDead::isAssumedDead(Instruction *I).
isAssumedDead__anon0ce335530111::AAIsDeadValueImpl2774   bool isAssumedDead(const Instruction *I) const override {
2775     return I == getCtxI() && isAssumedDead();
2776   }
2777 
2778   /// See AAIsDead::isKnownDead(Instruction *I).
isKnownDead__anon0ce335530111::AAIsDeadValueImpl2779   bool isKnownDead(const Instruction *I) const override {
2780     return isAssumedDead(I) && getKnown();
2781   }
2782 
2783   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AAIsDeadValueImpl2784   const std::string getAsStr() const override {
2785     return isAssumedDead() ? "assumed-dead" : "assumed-live";
2786   }
2787 
2788   /// Check if all uses are assumed dead.
areAllUsesAssumedDead__anon0ce335530111::AAIsDeadValueImpl2789   bool areAllUsesAssumedDead(Attributor &A, Value &V) {
2790     auto UsePred = [&](const Use &U, bool &Follow) { return false; };
2791     // Explicitly set the dependence class to required because we want a long
2792     // chain of N dependent instructions to be considered live as soon as one is
2793     // without going through N update cycles. This is not required for
2794     // correctness.
2795     return A.checkForAllUses(UsePred, *this, V, DepClassTy::REQUIRED);
2796   }
2797 
2798   /// Determine if \p I is assumed to be side-effect free.
isAssumedSideEffectFree__anon0ce335530111::AAIsDeadValueImpl2799   bool isAssumedSideEffectFree(Attributor &A, Instruction *I) {
2800     if (!I || wouldInstructionBeTriviallyDead(I))
2801       return true;
2802 
2803     auto *CB = dyn_cast<CallBase>(I);
2804     if (!CB || isa<IntrinsicInst>(CB))
2805       return false;
2806 
2807     const IRPosition &CallIRP = IRPosition::callsite_function(*CB);
2808     const auto &NoUnwindAA = A.getAndUpdateAAFor<AANoUnwind>(
2809         *this, CallIRP, /* TrackDependence */ false);
2810     if (!NoUnwindAA.isAssumedNoUnwind())
2811       return false;
2812     if (!NoUnwindAA.isKnownNoUnwind())
2813       A.recordDependence(NoUnwindAA, *this, DepClassTy::OPTIONAL);
2814 
2815     const auto &MemBehaviorAA = A.getAndUpdateAAFor<AAMemoryBehavior>(
2816         *this, CallIRP, /* TrackDependence */ false);
2817     if (MemBehaviorAA.isAssumedReadOnly()) {
2818       if (!MemBehaviorAA.isKnownReadOnly())
2819         A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
2820       return true;
2821     }
2822     return false;
2823   }
2824 };
2825 
2826 struct AAIsDeadFloating : public AAIsDeadValueImpl {
AAIsDeadFloating__anon0ce335530111::AAIsDeadFloating2827   AAIsDeadFloating(const IRPosition &IRP, Attributor &A)
2828       : AAIsDeadValueImpl(IRP, A) {}
2829 
2830   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAIsDeadFloating2831   void initialize(Attributor &A) override {
2832     if (isa<UndefValue>(getAssociatedValue())) {
2833       indicatePessimisticFixpoint();
2834       return;
2835     }
2836 
2837     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2838     if (!isAssumedSideEffectFree(A, I))
2839       indicatePessimisticFixpoint();
2840   }
2841 
2842   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAIsDeadFloating2843   ChangeStatus updateImpl(Attributor &A) override {
2844     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
2845     if (!isAssumedSideEffectFree(A, I))
2846       return indicatePessimisticFixpoint();
2847 
2848     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2849       return indicatePessimisticFixpoint();
2850     return ChangeStatus::UNCHANGED;
2851   }
2852 
2853   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAIsDeadFloating2854   ChangeStatus manifest(Attributor &A) override {
2855     Value &V = getAssociatedValue();
2856     if (auto *I = dyn_cast<Instruction>(&V)) {
2857       // If we get here we basically know the users are all dead. We check if
2858       // isAssumedSideEffectFree returns true here again because it might not be
2859       // the case and only the users are dead but the instruction (=call) is
2860       // still needed.
2861       if (isAssumedSideEffectFree(A, I) && !isa<InvokeInst>(I)) {
2862         A.deleteAfterManifest(*I);
2863         return ChangeStatus::CHANGED;
2864       }
2865     }
2866     if (V.use_empty())
2867       return ChangeStatus::UNCHANGED;
2868 
2869     bool UsedAssumedInformation = false;
2870     Optional<Constant *> C =
2871         A.getAssumedConstant(V, *this, UsedAssumedInformation);
2872     if (C.hasValue() && C.getValue())
2873       return ChangeStatus::UNCHANGED;
2874 
2875     // Replace the value with undef as it is dead but keep droppable uses around
2876     // as they provide information we don't want to give up on just yet.
2877     UndefValue &UV = *UndefValue::get(V.getType());
2878     bool AnyChange =
2879         A.changeValueAfterManifest(V, UV, /* ChangeDropppable */ false);
2880     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
2881   }
2882 
2883   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAIsDeadFloating2884   void trackStatistics() const override {
2885     STATS_DECLTRACK_FLOATING_ATTR(IsDead)
2886   }
2887 };
2888 
2889 struct AAIsDeadArgument : public AAIsDeadFloating {
AAIsDeadArgument__anon0ce335530111::AAIsDeadArgument2890   AAIsDeadArgument(const IRPosition &IRP, Attributor &A)
2891       : AAIsDeadFloating(IRP, A) {}
2892 
2893   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAIsDeadArgument2894   void initialize(Attributor &A) override {
2895     if (!A.isFunctionIPOAmendable(*getAnchorScope()))
2896       indicatePessimisticFixpoint();
2897   }
2898 
2899   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAIsDeadArgument2900   ChangeStatus manifest(Attributor &A) override {
2901     ChangeStatus Changed = AAIsDeadFloating::manifest(A);
2902     Argument &Arg = *getAssociatedArgument();
2903     if (A.isValidFunctionSignatureRewrite(Arg, /* ReplacementTypes */ {}))
2904       if (A.registerFunctionSignatureRewrite(
2905               Arg, /* ReplacementTypes */ {},
2906               Attributor::ArgumentReplacementInfo::CalleeRepairCBTy{},
2907               Attributor::ArgumentReplacementInfo::ACSRepairCBTy{})) {
2908         Arg.dropDroppableUses();
2909         return ChangeStatus::CHANGED;
2910       }
2911     return Changed;
2912   }
2913 
2914   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAIsDeadArgument2915   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(IsDead) }
2916 };
2917 
2918 struct AAIsDeadCallSiteArgument : public AAIsDeadValueImpl {
AAIsDeadCallSiteArgument__anon0ce335530111::AAIsDeadCallSiteArgument2919   AAIsDeadCallSiteArgument(const IRPosition &IRP, Attributor &A)
2920       : AAIsDeadValueImpl(IRP, A) {}
2921 
2922   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAIsDeadCallSiteArgument2923   void initialize(Attributor &A) override {
2924     if (isa<UndefValue>(getAssociatedValue()))
2925       indicatePessimisticFixpoint();
2926   }
2927 
2928   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAIsDeadCallSiteArgument2929   ChangeStatus updateImpl(Attributor &A) override {
2930     // TODO: Once we have call site specific value information we can provide
2931     //       call site specific liveness information and then it makes
2932     //       sense to specialize attributes for call sites arguments instead of
2933     //       redirecting requests to the callee argument.
2934     Argument *Arg = getAssociatedArgument();
2935     if (!Arg)
2936       return indicatePessimisticFixpoint();
2937     const IRPosition &ArgPos = IRPosition::argument(*Arg);
2938     auto &ArgAA = A.getAAFor<AAIsDead>(*this, ArgPos);
2939     return clampStateAndIndicateChange(getState(), ArgAA.getState());
2940   }
2941 
2942   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAIsDeadCallSiteArgument2943   ChangeStatus manifest(Attributor &A) override {
2944     CallBase &CB = cast<CallBase>(getAnchorValue());
2945     Use &U = CB.getArgOperandUse(getCallSiteArgNo());
2946     assert(!isa<UndefValue>(U.get()) &&
2947            "Expected undef values to be filtered out!");
2948     UndefValue &UV = *UndefValue::get(U->getType());
2949     if (A.changeUseAfterManifest(U, UV))
2950       return ChangeStatus::CHANGED;
2951     return ChangeStatus::UNCHANGED;
2952   }
2953 
2954   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAIsDeadCallSiteArgument2955   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(IsDead) }
2956 };
2957 
2958 struct AAIsDeadCallSiteReturned : public AAIsDeadFloating {
AAIsDeadCallSiteReturned__anon0ce335530111::AAIsDeadCallSiteReturned2959   AAIsDeadCallSiteReturned(const IRPosition &IRP, Attributor &A)
2960       : AAIsDeadFloating(IRP, A), IsAssumedSideEffectFree(true) {}
2961 
2962   /// See AAIsDead::isAssumedDead().
isAssumedDead__anon0ce335530111::AAIsDeadCallSiteReturned2963   bool isAssumedDead() const override {
2964     return AAIsDeadFloating::isAssumedDead() && IsAssumedSideEffectFree;
2965   }
2966 
2967   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAIsDeadCallSiteReturned2968   void initialize(Attributor &A) override {
2969     if (isa<UndefValue>(getAssociatedValue())) {
2970       indicatePessimisticFixpoint();
2971       return;
2972     }
2973 
2974     // We track this separately as a secondary state.
2975     IsAssumedSideEffectFree = isAssumedSideEffectFree(A, getCtxI());
2976   }
2977 
2978   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAIsDeadCallSiteReturned2979   ChangeStatus updateImpl(Attributor &A) override {
2980     ChangeStatus Changed = ChangeStatus::UNCHANGED;
2981     if (IsAssumedSideEffectFree && !isAssumedSideEffectFree(A, getCtxI())) {
2982       IsAssumedSideEffectFree = false;
2983       Changed = ChangeStatus::CHANGED;
2984     }
2985 
2986     if (!areAllUsesAssumedDead(A, getAssociatedValue()))
2987       return indicatePessimisticFixpoint();
2988     return Changed;
2989   }
2990 
2991   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAIsDeadCallSiteReturned2992   void trackStatistics() const override {
2993     if (IsAssumedSideEffectFree)
2994       STATS_DECLTRACK_CSRET_ATTR(IsDead)
2995     else
2996       STATS_DECLTRACK_CSRET_ATTR(UnusedResult)
2997   }
2998 
2999   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AAIsDeadCallSiteReturned3000   const std::string getAsStr() const override {
3001     return isAssumedDead()
3002                ? "assumed-dead"
3003                : (getAssumed() ? "assumed-dead-users" : "assumed-live");
3004   }
3005 
3006 private:
3007   bool IsAssumedSideEffectFree;
3008 };
3009 
3010 struct AAIsDeadReturned : public AAIsDeadValueImpl {
AAIsDeadReturned__anon0ce335530111::AAIsDeadReturned3011   AAIsDeadReturned(const IRPosition &IRP, Attributor &A)
3012       : AAIsDeadValueImpl(IRP, A) {}
3013 
3014   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAIsDeadReturned3015   ChangeStatus updateImpl(Attributor &A) override {
3016 
3017     A.checkForAllInstructions([](Instruction &) { return true; }, *this,
3018                               {Instruction::Ret});
3019 
3020     auto PredForCallSite = [&](AbstractCallSite ACS) {
3021       if (ACS.isCallbackCall() || !ACS.getInstruction())
3022         return false;
3023       return areAllUsesAssumedDead(A, *ACS.getInstruction());
3024     };
3025 
3026     bool AllCallSitesKnown;
3027     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
3028                                 AllCallSitesKnown))
3029       return indicatePessimisticFixpoint();
3030 
3031     return ChangeStatus::UNCHANGED;
3032   }
3033 
3034   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAIsDeadReturned3035   ChangeStatus manifest(Attributor &A) override {
3036     // TODO: Rewrite the signature to return void?
3037     bool AnyChange = false;
3038     UndefValue &UV = *UndefValue::get(getAssociatedFunction()->getReturnType());
3039     auto RetInstPred = [&](Instruction &I) {
3040       ReturnInst &RI = cast<ReturnInst>(I);
3041       if (!isa<UndefValue>(RI.getReturnValue()))
3042         AnyChange |= A.changeUseAfterManifest(RI.getOperandUse(0), UV);
3043       return true;
3044     };
3045     A.checkForAllInstructions(RetInstPred, *this, {Instruction::Ret});
3046     return AnyChange ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
3047   }
3048 
3049   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAIsDeadReturned3050   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(IsDead) }
3051 };
3052 
3053 struct AAIsDeadFunction : public AAIsDead {
AAIsDeadFunction__anon0ce335530111::AAIsDeadFunction3054   AAIsDeadFunction(const IRPosition &IRP, Attributor &A) : AAIsDead(IRP, A) {}
3055 
3056   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAIsDeadFunction3057   void initialize(Attributor &A) override {
3058     const Function *F = getAnchorScope();
3059     if (F && !F->isDeclaration()) {
3060       // We only want to compute liveness once. If the function is not part of
3061       // the SCC, skip it.
3062       if (A.isRunOn(*const_cast<Function *>(F))) {
3063         ToBeExploredFrom.insert(&F->getEntryBlock().front());
3064         assumeLive(A, F->getEntryBlock());
3065       } else {
3066         indicatePessimisticFixpoint();
3067       }
3068     }
3069   }
3070 
3071   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AAIsDeadFunction3072   const std::string getAsStr() const override {
3073     return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
3074            std::to_string(getAnchorScope()->size()) + "][#TBEP " +
3075            std::to_string(ToBeExploredFrom.size()) + "][#KDE " +
3076            std::to_string(KnownDeadEnds.size()) + "]";
3077   }
3078 
3079   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAIsDeadFunction3080   ChangeStatus manifest(Attributor &A) override {
3081     assert(getState().isValidState() &&
3082            "Attempted to manifest an invalid state!");
3083 
3084     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
3085     Function &F = *getAnchorScope();
3086 
3087     if (AssumedLiveBlocks.empty()) {
3088       A.deleteAfterManifest(F);
3089       return ChangeStatus::CHANGED;
3090     }
3091 
3092     // Flag to determine if we can change an invoke to a call assuming the
3093     // callee is nounwind. This is not possible if the personality of the
3094     // function allows to catch asynchronous exceptions.
3095     bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
3096 
3097     KnownDeadEnds.set_union(ToBeExploredFrom);
3098     for (const Instruction *DeadEndI : KnownDeadEnds) {
3099       auto *CB = dyn_cast<CallBase>(DeadEndI);
3100       if (!CB)
3101         continue;
3102       const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3103           *this, IRPosition::callsite_function(*CB), /* TrackDependence */ true,
3104           DepClassTy::OPTIONAL);
3105       bool MayReturn = !NoReturnAA.isAssumedNoReturn();
3106       if (MayReturn && (!Invoke2CallAllowed || !isa<InvokeInst>(CB)))
3107         continue;
3108 
3109       if (auto *II = dyn_cast<InvokeInst>(DeadEndI))
3110         A.registerInvokeWithDeadSuccessor(const_cast<InvokeInst &>(*II));
3111       else
3112         A.changeToUnreachableAfterManifest(
3113             const_cast<Instruction *>(DeadEndI->getNextNode()));
3114       HasChanged = ChangeStatus::CHANGED;
3115     }
3116 
3117     STATS_DECL(AAIsDead, BasicBlock, "Number of dead basic blocks deleted.");
3118     for (BasicBlock &BB : F)
3119       if (!AssumedLiveBlocks.count(&BB)) {
3120         A.deleteAfterManifest(BB);
3121         ++BUILD_STAT_NAME(AAIsDead, BasicBlock);
3122       }
3123 
3124     return HasChanged;
3125   }
3126 
3127   /// See AbstractAttribute::updateImpl(...).
3128   ChangeStatus updateImpl(Attributor &A) override;
3129 
isEdgeDead__anon0ce335530111::AAIsDeadFunction3130   bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const override {
3131     return !AssumedLiveEdges.count(std::make_pair(From, To));
3132   }
3133 
3134   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAIsDeadFunction3135   void trackStatistics() const override {}
3136 
3137   /// Returns true if the function is assumed dead.
isAssumedDead__anon0ce335530111::AAIsDeadFunction3138   bool isAssumedDead() const override { return false; }
3139 
3140   /// See AAIsDead::isKnownDead().
isKnownDead__anon0ce335530111::AAIsDeadFunction3141   bool isKnownDead() const override { return false; }
3142 
3143   /// See AAIsDead::isAssumedDead(BasicBlock *).
isAssumedDead__anon0ce335530111::AAIsDeadFunction3144   bool isAssumedDead(const BasicBlock *BB) const override {
3145     assert(BB->getParent() == getAnchorScope() &&
3146            "BB must be in the same anchor scope function.");
3147 
3148     if (!getAssumed())
3149       return false;
3150     return !AssumedLiveBlocks.count(BB);
3151   }
3152 
3153   /// See AAIsDead::isKnownDead(BasicBlock *).
isKnownDead__anon0ce335530111::AAIsDeadFunction3154   bool isKnownDead(const BasicBlock *BB) const override {
3155     return getKnown() && isAssumedDead(BB);
3156   }
3157 
3158   /// See AAIsDead::isAssumed(Instruction *I).
isAssumedDead__anon0ce335530111::AAIsDeadFunction3159   bool isAssumedDead(const Instruction *I) const override {
3160     assert(I->getParent()->getParent() == getAnchorScope() &&
3161            "Instruction must be in the same anchor scope function.");
3162 
3163     if (!getAssumed())
3164       return false;
3165 
3166     // If it is not in AssumedLiveBlocks then it for sure dead.
3167     // Otherwise, it can still be after noreturn call in a live block.
3168     if (!AssumedLiveBlocks.count(I->getParent()))
3169       return true;
3170 
3171     // If it is not after a liveness barrier it is live.
3172     const Instruction *PrevI = I->getPrevNode();
3173     while (PrevI) {
3174       if (KnownDeadEnds.count(PrevI) || ToBeExploredFrom.count(PrevI))
3175         return true;
3176       PrevI = PrevI->getPrevNode();
3177     }
3178     return false;
3179   }
3180 
3181   /// See AAIsDead::isKnownDead(Instruction *I).
isKnownDead__anon0ce335530111::AAIsDeadFunction3182   bool isKnownDead(const Instruction *I) const override {
3183     return getKnown() && isAssumedDead(I);
3184   }
3185 
3186   /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
3187   /// that internal function called from \p BB should now be looked at.
assumeLive__anon0ce335530111::AAIsDeadFunction3188   bool assumeLive(Attributor &A, const BasicBlock &BB) {
3189     if (!AssumedLiveBlocks.insert(&BB).second)
3190       return false;
3191 
3192     // We assume that all of BB is (probably) live now and if there are calls to
3193     // internal functions we will assume that those are now live as well. This
3194     // is a performance optimization for blocks with calls to a lot of internal
3195     // functions. It can however cause dead functions to be treated as live.
3196     for (const Instruction &I : BB)
3197       if (const auto *CB = dyn_cast<CallBase>(&I))
3198         if (const Function *F = CB->getCalledFunction())
3199           if (F->hasLocalLinkage())
3200             A.markLiveInternalFunction(*F);
3201     return true;
3202   }
3203 
3204   /// Collection of instructions that need to be explored again, e.g., we
3205   /// did assume they do not transfer control to (one of their) successors.
3206   SmallSetVector<const Instruction *, 8> ToBeExploredFrom;
3207 
3208   /// Collection of instructions that are known to not transfer control.
3209   SmallSetVector<const Instruction *, 8> KnownDeadEnds;
3210 
3211   /// Collection of all assumed live edges
3212   DenseSet<std::pair<const BasicBlock *, const BasicBlock *>> AssumedLiveEdges;
3213 
3214   /// Collection of all assumed live BasicBlocks.
3215   DenseSet<const BasicBlock *> AssumedLiveBlocks;
3216 };
3217 
3218 static bool
identifyAliveSuccessors(Attributor & A,const CallBase & CB,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3219 identifyAliveSuccessors(Attributor &A, const CallBase &CB,
3220                         AbstractAttribute &AA,
3221                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3222   const IRPosition &IPos = IRPosition::callsite_function(CB);
3223 
3224   const auto &NoReturnAA = A.getAndUpdateAAFor<AANoReturn>(
3225       AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3226   if (NoReturnAA.isAssumedNoReturn())
3227     return !NoReturnAA.isKnownNoReturn();
3228   if (CB.isTerminator())
3229     AliveSuccessors.push_back(&CB.getSuccessor(0)->front());
3230   else
3231     AliveSuccessors.push_back(CB.getNextNode());
3232   return false;
3233 }
3234 
3235 static bool
identifyAliveSuccessors(Attributor & A,const InvokeInst & II,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3236 identifyAliveSuccessors(Attributor &A, const InvokeInst &II,
3237                         AbstractAttribute &AA,
3238                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3239   bool UsedAssumedInformation =
3240       identifyAliveSuccessors(A, cast<CallBase>(II), AA, AliveSuccessors);
3241 
3242   // First, determine if we can change an invoke to a call assuming the
3243   // callee is nounwind. This is not possible if the personality of the
3244   // function allows to catch asynchronous exceptions.
3245   if (AAIsDeadFunction::mayCatchAsynchronousExceptions(*II.getFunction())) {
3246     AliveSuccessors.push_back(&II.getUnwindDest()->front());
3247   } else {
3248     const IRPosition &IPos = IRPosition::callsite_function(II);
3249     const auto &AANoUnw = A.getAndUpdateAAFor<AANoUnwind>(
3250         AA, IPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
3251     if (AANoUnw.isAssumedNoUnwind()) {
3252       UsedAssumedInformation |= !AANoUnw.isKnownNoUnwind();
3253     } else {
3254       AliveSuccessors.push_back(&II.getUnwindDest()->front());
3255     }
3256   }
3257   return UsedAssumedInformation;
3258 }
3259 
3260 static bool
identifyAliveSuccessors(Attributor & A,const BranchInst & BI,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3261 identifyAliveSuccessors(Attributor &A, const BranchInst &BI,
3262                         AbstractAttribute &AA,
3263                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3264   bool UsedAssumedInformation = false;
3265   if (BI.getNumSuccessors() == 1) {
3266     AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3267   } else {
3268     Optional<ConstantInt *> CI = getAssumedConstantInt(
3269         A, *BI.getCondition(), AA, UsedAssumedInformation);
3270     if (!CI.hasValue()) {
3271       // No value yet, assume both edges are dead.
3272     } else if (CI.getValue()) {
3273       const BasicBlock *SuccBB =
3274           BI.getSuccessor(1 - CI.getValue()->getZExtValue());
3275       AliveSuccessors.push_back(&SuccBB->front());
3276     } else {
3277       AliveSuccessors.push_back(&BI.getSuccessor(0)->front());
3278       AliveSuccessors.push_back(&BI.getSuccessor(1)->front());
3279       UsedAssumedInformation = false;
3280     }
3281   }
3282   return UsedAssumedInformation;
3283 }
3284 
3285 static bool
identifyAliveSuccessors(Attributor & A,const SwitchInst & SI,AbstractAttribute & AA,SmallVectorImpl<const Instruction * > & AliveSuccessors)3286 identifyAliveSuccessors(Attributor &A, const SwitchInst &SI,
3287                         AbstractAttribute &AA,
3288                         SmallVectorImpl<const Instruction *> &AliveSuccessors) {
3289   bool UsedAssumedInformation = false;
3290   Optional<ConstantInt *> CI =
3291       getAssumedConstantInt(A, *SI.getCondition(), AA, UsedAssumedInformation);
3292   if (!CI.hasValue()) {
3293     // No value yet, assume all edges are dead.
3294   } else if (CI.getValue()) {
3295     for (auto &CaseIt : SI.cases()) {
3296       if (CaseIt.getCaseValue() == CI.getValue()) {
3297         AliveSuccessors.push_back(&CaseIt.getCaseSuccessor()->front());
3298         return UsedAssumedInformation;
3299       }
3300     }
3301     AliveSuccessors.push_back(&SI.getDefaultDest()->front());
3302     return UsedAssumedInformation;
3303   } else {
3304     for (const BasicBlock *SuccBB : successors(SI.getParent()))
3305       AliveSuccessors.push_back(&SuccBB->front());
3306   }
3307   return UsedAssumedInformation;
3308 }
3309 
updateImpl(Attributor & A)3310 ChangeStatus AAIsDeadFunction::updateImpl(Attributor &A) {
3311   ChangeStatus Change = ChangeStatus::UNCHANGED;
3312 
3313   LLVM_DEBUG(dbgs() << "[AAIsDead] Live [" << AssumedLiveBlocks.size() << "/"
3314                     << getAnchorScope()->size() << "] BBs and "
3315                     << ToBeExploredFrom.size() << " exploration points and "
3316                     << KnownDeadEnds.size() << " known dead ends\n");
3317 
3318   // Copy and clear the list of instructions we need to explore from. It is
3319   // refilled with instructions the next update has to look at.
3320   SmallVector<const Instruction *, 8> Worklist(ToBeExploredFrom.begin(),
3321                                                ToBeExploredFrom.end());
3322   decltype(ToBeExploredFrom) NewToBeExploredFrom;
3323 
3324   SmallVector<const Instruction *, 8> AliveSuccessors;
3325   while (!Worklist.empty()) {
3326     const Instruction *I = Worklist.pop_back_val();
3327     LLVM_DEBUG(dbgs() << "[AAIsDead] Exploration inst: " << *I << "\n");
3328 
3329     // Fast forward for uninteresting instructions. We could look for UB here
3330     // though.
3331     while (!I->isTerminator() && !isa<CallBase>(I)) {
3332       Change = ChangeStatus::CHANGED;
3333       I = I->getNextNode();
3334     }
3335 
3336     AliveSuccessors.clear();
3337 
3338     bool UsedAssumedInformation = false;
3339     switch (I->getOpcode()) {
3340     // TODO: look for (assumed) UB to backwards propagate "deadness".
3341     default:
3342       assert(I->isTerminator() &&
3343              "Expected non-terminators to be handled already!");
3344       for (const BasicBlock *SuccBB : successors(I->getParent()))
3345         AliveSuccessors.push_back(&SuccBB->front());
3346       break;
3347     case Instruction::Call:
3348       UsedAssumedInformation = identifyAliveSuccessors(A, cast<CallInst>(*I),
3349                                                        *this, AliveSuccessors);
3350       break;
3351     case Instruction::Invoke:
3352       UsedAssumedInformation = identifyAliveSuccessors(A, cast<InvokeInst>(*I),
3353                                                        *this, AliveSuccessors);
3354       break;
3355     case Instruction::Br:
3356       UsedAssumedInformation = identifyAliveSuccessors(A, cast<BranchInst>(*I),
3357                                                        *this, AliveSuccessors);
3358       break;
3359     case Instruction::Switch:
3360       UsedAssumedInformation = identifyAliveSuccessors(A, cast<SwitchInst>(*I),
3361                                                        *this, AliveSuccessors);
3362       break;
3363     }
3364 
3365     if (UsedAssumedInformation) {
3366       NewToBeExploredFrom.insert(I);
3367     } else {
3368       Change = ChangeStatus::CHANGED;
3369       if (AliveSuccessors.empty() ||
3370           (I->isTerminator() && AliveSuccessors.size() < I->getNumSuccessors()))
3371         KnownDeadEnds.insert(I);
3372     }
3373 
3374     LLVM_DEBUG(dbgs() << "[AAIsDead] #AliveSuccessors: "
3375                       << AliveSuccessors.size() << " UsedAssumedInformation: "
3376                       << UsedAssumedInformation << "\n");
3377 
3378     for (const Instruction *AliveSuccessor : AliveSuccessors) {
3379       if (!I->isTerminator()) {
3380         assert(AliveSuccessors.size() == 1 &&
3381                "Non-terminator expected to have a single successor!");
3382         Worklist.push_back(AliveSuccessor);
3383       } else {
3384         // record the assumed live edge
3385         AssumedLiveEdges.insert(
3386             std::make_pair(I->getParent(), AliveSuccessor->getParent()));
3387         if (assumeLive(A, *AliveSuccessor->getParent()))
3388           Worklist.push_back(AliveSuccessor);
3389       }
3390     }
3391   }
3392 
3393   ToBeExploredFrom = std::move(NewToBeExploredFrom);
3394 
3395   // If we know everything is live there is no need to query for liveness.
3396   // Instead, indicating a pessimistic fixpoint will cause the state to be
3397   // "invalid" and all queries to be answered conservatively without lookups.
3398   // To be in this state we have to (1) finished the exploration and (3) not
3399   // discovered any non-trivial dead end and (2) not ruled unreachable code
3400   // dead.
3401   if (ToBeExploredFrom.empty() &&
3402       getAnchorScope()->size() == AssumedLiveBlocks.size() &&
3403       llvm::all_of(KnownDeadEnds, [](const Instruction *DeadEndI) {
3404         return DeadEndI->isTerminator() && DeadEndI->getNumSuccessors() == 0;
3405       }))
3406     return indicatePessimisticFixpoint();
3407   return Change;
3408 }
3409 
3410 /// Liveness information for a call sites.
3411 struct AAIsDeadCallSite final : AAIsDeadFunction {
AAIsDeadCallSite__anon0ce335530111::AAIsDeadCallSite3412   AAIsDeadCallSite(const IRPosition &IRP, Attributor &A)
3413       : AAIsDeadFunction(IRP, A) {}
3414 
3415   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAIsDeadCallSite3416   void initialize(Attributor &A) override {
3417     // TODO: Once we have call site specific value information we can provide
3418     //       call site specific liveness information and then it makes
3419     //       sense to specialize attributes for call sites instead of
3420     //       redirecting requests to the callee.
3421     llvm_unreachable("Abstract attributes for liveness are not "
3422                      "supported for call sites yet!");
3423   }
3424 
3425   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAIsDeadCallSite3426   ChangeStatus updateImpl(Attributor &A) override {
3427     return indicatePessimisticFixpoint();
3428   }
3429 
3430   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAIsDeadCallSite3431   void trackStatistics() const override {}
3432 };
3433 
3434 /// -------------------- Dereferenceable Argument Attribute --------------------
3435 
3436 template <>
clampStateAndIndicateChange(DerefState & S,const DerefState & R)3437 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
3438                                                      const DerefState &R) {
3439   ChangeStatus CS0 =
3440       clampStateAndIndicateChange(S.DerefBytesState, R.DerefBytesState);
3441   ChangeStatus CS1 = clampStateAndIndicateChange(S.GlobalState, R.GlobalState);
3442   return CS0 | CS1;
3443 }
3444 
3445 struct AADereferenceableImpl : AADereferenceable {
AADereferenceableImpl__anon0ce335530111::AADereferenceableImpl3446   AADereferenceableImpl(const IRPosition &IRP, Attributor &A)
3447       : AADereferenceable(IRP, A) {}
3448   using StateType = DerefState;
3449 
3450   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AADereferenceableImpl3451   void initialize(Attributor &A) override {
3452     SmallVector<Attribute, 4> Attrs;
3453     getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
3454              Attrs, /* IgnoreSubsumingPositions */ false, &A);
3455     for (const Attribute &Attr : Attrs)
3456       takeKnownDerefBytesMaximum(Attr.getValueAsInt());
3457 
3458     const IRPosition &IRP = this->getIRPosition();
3459     NonNullAA = &A.getAAFor<AANonNull>(*this, IRP,
3460                                        /* TrackDependence */ false);
3461 
3462     bool CanBeNull;
3463     takeKnownDerefBytesMaximum(
3464         IRP.getAssociatedValue().getPointerDereferenceableBytes(
3465             A.getDataLayout(), CanBeNull));
3466 
3467     bool IsFnInterface = IRP.isFnInterfaceKind();
3468     Function *FnScope = IRP.getAnchorScope();
3469     if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) {
3470       indicatePessimisticFixpoint();
3471       return;
3472     }
3473 
3474     if (Instruction *CtxI = getCtxI())
3475       followUsesInMBEC(*this, A, getState(), *CtxI);
3476   }
3477 
3478   /// See AbstractAttribute::getState()
3479   /// {
getState__anon0ce335530111::AADereferenceableImpl3480   StateType &getState() override { return *this; }
getState__anon0ce335530111::AADereferenceableImpl3481   const StateType &getState() const override { return *this; }
3482   /// }
3483 
3484   /// Helper function for collecting accessed bytes in must-be-executed-context
addAccessedBytesForUse__anon0ce335530111::AADereferenceableImpl3485   void addAccessedBytesForUse(Attributor &A, const Use *U, const Instruction *I,
3486                               DerefState &State) {
3487     const Value *UseV = U->get();
3488     if (!UseV->getType()->isPointerTy())
3489       return;
3490 
3491     Type *PtrTy = UseV->getType();
3492     const DataLayout &DL = A.getDataLayout();
3493     int64_t Offset;
3494     if (const Value *Base = getBasePointerOfAccessPointerOperand(
3495             I, Offset, DL, /*AllowNonInbounds*/ true)) {
3496       if (Base == &getAssociatedValue() &&
3497           getPointerOperand(I, /* AllowVolatile */ false) == UseV) {
3498         uint64_t Size = DL.getTypeStoreSize(PtrTy->getPointerElementType());
3499         State.addAccessedBytes(Offset, Size);
3500       }
3501     }
3502     return;
3503   }
3504 
3505   /// See followUsesInMBEC
followUseInMBEC__anon0ce335530111::AADereferenceableImpl3506   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3507                        AADereferenceable::StateType &State) {
3508     bool IsNonNull = false;
3509     bool TrackUse = false;
3510     int64_t DerefBytes = getKnownNonNullAndDerefBytesForUse(
3511         A, *this, getAssociatedValue(), U, I, IsNonNull, TrackUse);
3512     LLVM_DEBUG(dbgs() << "[AADereferenceable] Deref bytes: " << DerefBytes
3513                       << " for instruction " << *I << "\n");
3514 
3515     addAccessedBytesForUse(A, U, I, State);
3516     State.takeKnownDerefBytesMaximum(DerefBytes);
3517     return TrackUse;
3518   }
3519 
3520   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AADereferenceableImpl3521   ChangeStatus manifest(Attributor &A) override {
3522     ChangeStatus Change = AADereferenceable::manifest(A);
3523     if (isAssumedNonNull() && hasAttr(Attribute::DereferenceableOrNull)) {
3524       removeAttrs({Attribute::DereferenceableOrNull});
3525       return ChangeStatus::CHANGED;
3526     }
3527     return Change;
3528   }
3529 
getDeducedAttributes__anon0ce335530111::AADereferenceableImpl3530   void getDeducedAttributes(LLVMContext &Ctx,
3531                             SmallVectorImpl<Attribute> &Attrs) const override {
3532     // TODO: Add *_globally support
3533     if (isAssumedNonNull())
3534       Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
3535           Ctx, getAssumedDereferenceableBytes()));
3536     else
3537       Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
3538           Ctx, getAssumedDereferenceableBytes()));
3539   }
3540 
3541   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AADereferenceableImpl3542   const std::string getAsStr() const override {
3543     if (!getAssumedDereferenceableBytes())
3544       return "unknown-dereferenceable";
3545     return std::string("dereferenceable") +
3546            (isAssumedNonNull() ? "" : "_or_null") +
3547            (isAssumedGlobal() ? "_globally" : "") + "<" +
3548            std::to_string(getKnownDereferenceableBytes()) + "-" +
3549            std::to_string(getAssumedDereferenceableBytes()) + ">";
3550   }
3551 };
3552 
3553 /// Dereferenceable attribute for a floating value.
3554 struct AADereferenceableFloating : AADereferenceableImpl {
AADereferenceableFloating__anon0ce335530111::AADereferenceableFloating3555   AADereferenceableFloating(const IRPosition &IRP, Attributor &A)
3556       : AADereferenceableImpl(IRP, A) {}
3557 
3558   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AADereferenceableFloating3559   ChangeStatus updateImpl(Attributor &A) override {
3560     const DataLayout &DL = A.getDataLayout();
3561 
3562     auto VisitValueCB = [&](const Value &V, const Instruction *, DerefState &T,
3563                             bool Stripped) -> bool {
3564       unsigned IdxWidth =
3565           DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
3566       APInt Offset(IdxWidth, 0);
3567       const Value *Base =
3568           stripAndAccumulateMinimalOffsets(A, *this, &V, DL, Offset, false);
3569 
3570       const auto &AA =
3571           A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
3572       int64_t DerefBytes = 0;
3573       if (!Stripped && this == &AA) {
3574         // Use IR information if we did not strip anything.
3575         // TODO: track globally.
3576         bool CanBeNull;
3577         DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
3578         T.GlobalState.indicatePessimisticFixpoint();
3579       } else {
3580         const DerefState &DS = AA.getState();
3581         DerefBytes = DS.DerefBytesState.getAssumed();
3582         T.GlobalState &= DS.GlobalState;
3583       }
3584 
3585       // For now we do not try to "increase" dereferenceability due to negative
3586       // indices as we first have to come up with code to deal with loops and
3587       // for overflows of the dereferenceable bytes.
3588       int64_t OffsetSExt = Offset.getSExtValue();
3589       if (OffsetSExt < 0)
3590         OffsetSExt = 0;
3591 
3592       T.takeAssumedDerefBytesMinimum(
3593           std::max(int64_t(0), DerefBytes - OffsetSExt));
3594 
3595       if (this == &AA) {
3596         if (!Stripped) {
3597           // If nothing was stripped IR information is all we got.
3598           T.takeKnownDerefBytesMaximum(
3599               std::max(int64_t(0), DerefBytes - OffsetSExt));
3600           T.indicatePessimisticFixpoint();
3601         } else if (OffsetSExt > 0) {
3602           // If something was stripped but there is circular reasoning we look
3603           // for the offset. If it is positive we basically decrease the
3604           // dereferenceable bytes in a circluar loop now, which will simply
3605           // drive them down to the known value in a very slow way which we
3606           // can accelerate.
3607           T.indicatePessimisticFixpoint();
3608         }
3609       }
3610 
3611       return T.isValidState();
3612     };
3613 
3614     DerefState T;
3615     if (!genericValueTraversal<AADereferenceable, DerefState>(
3616             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
3617       return indicatePessimisticFixpoint();
3618 
3619     return clampStateAndIndicateChange(getState(), T);
3620   }
3621 
3622   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AADereferenceableFloating3623   void trackStatistics() const override {
3624     STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
3625   }
3626 };
3627 
3628 /// Dereferenceable attribute for a return value.
3629 struct AADereferenceableReturned final
3630     : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl> {
AADereferenceableReturned__anon0ce335530111::AADereferenceableReturned3631   AADereferenceableReturned(const IRPosition &IRP, Attributor &A)
3632       : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl>(
3633             IRP, A) {}
3634 
3635   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AADereferenceableReturned3636   void trackStatistics() const override {
3637     STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
3638   }
3639 };
3640 
3641 /// Dereferenceable attribute for an argument
3642 struct AADereferenceableArgument final
3643     : AAArgumentFromCallSiteArguments<AADereferenceable,
3644                                       AADereferenceableImpl> {
3645   using Base =
3646       AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl>;
AADereferenceableArgument__anon0ce335530111::AADereferenceableArgument3647   AADereferenceableArgument(const IRPosition &IRP, Attributor &A)
3648       : Base(IRP, A) {}
3649 
3650   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AADereferenceableArgument3651   void trackStatistics() const override {
3652     STATS_DECLTRACK_ARG_ATTR(dereferenceable)
3653   }
3654 };
3655 
3656 /// Dereferenceable attribute for a call site argument.
3657 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
AADereferenceableCallSiteArgument__anon0ce335530111::AADereferenceableCallSiteArgument3658   AADereferenceableCallSiteArgument(const IRPosition &IRP, Attributor &A)
3659       : AADereferenceableFloating(IRP, A) {}
3660 
3661   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AADereferenceableCallSiteArgument3662   void trackStatistics() const override {
3663     STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
3664   }
3665 };
3666 
3667 /// Dereferenceable attribute deduction for a call site return value.
3668 struct AADereferenceableCallSiteReturned final
3669     : AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl> {
3670   using Base =
3671       AACallSiteReturnedFromReturned<AADereferenceable, AADereferenceableImpl>;
AADereferenceableCallSiteReturned__anon0ce335530111::AADereferenceableCallSiteReturned3672   AADereferenceableCallSiteReturned(const IRPosition &IRP, Attributor &A)
3673       : Base(IRP, A) {}
3674 
3675   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AADereferenceableCallSiteReturned3676   void trackStatistics() const override {
3677     STATS_DECLTRACK_CS_ATTR(dereferenceable);
3678   }
3679 };
3680 
3681 // ------------------------ Align Argument Attribute ------------------------
3682 
getKnownAlignForUse(Attributor & A,AbstractAttribute & QueryingAA,Value & AssociatedValue,const Use * U,const Instruction * I,bool & TrackUse)3683 static unsigned getKnownAlignForUse(Attributor &A,
3684                                     AbstractAttribute &QueryingAA,
3685                                     Value &AssociatedValue, const Use *U,
3686                                     const Instruction *I, bool &TrackUse) {
3687   // We need to follow common pointer manipulation uses to the accesses they
3688   // feed into.
3689   if (isa<CastInst>(I)) {
3690     // Follow all but ptr2int casts.
3691     TrackUse = !isa<PtrToIntInst>(I);
3692     return 0;
3693   }
3694   if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3695     if (GEP->hasAllConstantIndices()) {
3696       TrackUse = true;
3697       return 0;
3698     }
3699   }
3700 
3701   MaybeAlign MA;
3702   if (const auto *CB = dyn_cast<CallBase>(I)) {
3703     if (CB->isBundleOperand(U) || CB->isCallee(U))
3704       return 0;
3705 
3706     unsigned ArgNo = CB->getArgOperandNo(U);
3707     IRPosition IRP = IRPosition::callsite_argument(*CB, ArgNo);
3708     // As long as we only use known information there is no need to track
3709     // dependences here.
3710     auto &AlignAA = A.getAAFor<AAAlign>(QueryingAA, IRP,
3711                                         /* TrackDependence */ false);
3712     MA = MaybeAlign(AlignAA.getKnownAlign());
3713   }
3714 
3715   const DataLayout &DL = A.getDataLayout();
3716   const Value *UseV = U->get();
3717   if (auto *SI = dyn_cast<StoreInst>(I)) {
3718     if (SI->getPointerOperand() == UseV)
3719       MA = SI->getAlign();
3720   } else if (auto *LI = dyn_cast<LoadInst>(I)) {
3721     if (LI->getPointerOperand() == UseV)
3722       MA = LI->getAlign();
3723   }
3724 
3725   if (!MA || *MA <= 1)
3726     return 0;
3727 
3728   unsigned Alignment = MA->value();
3729   int64_t Offset;
3730 
3731   if (const Value *Base = GetPointerBaseWithConstantOffset(UseV, Offset, DL)) {
3732     if (Base == &AssociatedValue) {
3733       // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3734       // So we can say that the maximum power of two which is a divisor of
3735       // gcd(Offset, Alignment) is an alignment.
3736 
3737       uint32_t gcd =
3738           greatestCommonDivisor(uint32_t(abs((int32_t)Offset)), Alignment);
3739       Alignment = llvm::PowerOf2Floor(gcd);
3740     }
3741   }
3742 
3743   return Alignment;
3744 }
3745 
3746 struct AAAlignImpl : AAAlign {
AAAlignImpl__anon0ce335530111::AAAlignImpl3747   AAAlignImpl(const IRPosition &IRP, Attributor &A) : AAAlign(IRP, A) {}
3748 
3749   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAAlignImpl3750   void initialize(Attributor &A) override {
3751     SmallVector<Attribute, 4> Attrs;
3752     getAttrs({Attribute::Alignment}, Attrs);
3753     for (const Attribute &Attr : Attrs)
3754       takeKnownMaximum(Attr.getValueAsInt());
3755 
3756     Value &V = getAssociatedValue();
3757     // TODO: This is a HACK to avoid getPointerAlignment to introduce a ptr2int
3758     //       use of the function pointer. This was caused by D73131. We want to
3759     //       avoid this for function pointers especially because we iterate
3760     //       their uses and int2ptr is not handled. It is not a correctness
3761     //       problem though!
3762     if (!V.getType()->getPointerElementType()->isFunctionTy())
3763       takeKnownMaximum(V.getPointerAlignment(A.getDataLayout()).value());
3764 
3765     if (getIRPosition().isFnInterfaceKind() &&
3766         (!getAnchorScope() ||
3767          !A.isFunctionIPOAmendable(*getAssociatedFunction()))) {
3768       indicatePessimisticFixpoint();
3769       return;
3770     }
3771 
3772     if (Instruction *CtxI = getCtxI())
3773       followUsesInMBEC(*this, A, getState(), *CtxI);
3774   }
3775 
3776   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAAlignImpl3777   ChangeStatus manifest(Attributor &A) override {
3778     ChangeStatus LoadStoreChanged = ChangeStatus::UNCHANGED;
3779 
3780     // Check for users that allow alignment annotations.
3781     Value &AssociatedValue = getAssociatedValue();
3782     for (const Use &U : AssociatedValue.uses()) {
3783       if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
3784         if (SI->getPointerOperand() == &AssociatedValue)
3785           if (SI->getAlignment() < getAssumedAlign()) {
3786             STATS_DECLTRACK(AAAlign, Store,
3787                             "Number of times alignment added to a store");
3788             SI->setAlignment(Align(getAssumedAlign()));
3789             LoadStoreChanged = ChangeStatus::CHANGED;
3790           }
3791       } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
3792         if (LI->getPointerOperand() == &AssociatedValue)
3793           if (LI->getAlignment() < getAssumedAlign()) {
3794             LI->setAlignment(Align(getAssumedAlign()));
3795             STATS_DECLTRACK(AAAlign, Load,
3796                             "Number of times alignment added to a load");
3797             LoadStoreChanged = ChangeStatus::CHANGED;
3798           }
3799       }
3800     }
3801 
3802     ChangeStatus Changed = AAAlign::manifest(A);
3803 
3804     Align InheritAlign =
3805         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3806     if (InheritAlign >= getAssumedAlign())
3807       return LoadStoreChanged;
3808     return Changed | LoadStoreChanged;
3809   }
3810 
3811   // TODO: Provide a helper to determine the implied ABI alignment and check in
3812   //       the existing manifest method and a new one for AAAlignImpl that value
3813   //       to avoid making the alignment explicit if it did not improve.
3814 
3815   /// See AbstractAttribute::getDeducedAttributes
3816   virtual void
getDeducedAttributes__anon0ce335530111::AAAlignImpl3817   getDeducedAttributes(LLVMContext &Ctx,
3818                        SmallVectorImpl<Attribute> &Attrs) const override {
3819     if (getAssumedAlign() > 1)
3820       Attrs.emplace_back(
3821           Attribute::getWithAlignment(Ctx, Align(getAssumedAlign())));
3822   }
3823 
3824   /// See followUsesInMBEC
followUseInMBEC__anon0ce335530111::AAAlignImpl3825   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
3826                        AAAlign::StateType &State) {
3827     bool TrackUse = false;
3828 
3829     unsigned int KnownAlign =
3830         getKnownAlignForUse(A, *this, getAssociatedValue(), U, I, TrackUse);
3831     State.takeKnownMaximum(KnownAlign);
3832 
3833     return TrackUse;
3834   }
3835 
3836   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AAAlignImpl3837   const std::string getAsStr() const override {
3838     return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
3839                                 "-" + std::to_string(getAssumedAlign()) + ">")
3840                              : "unknown-align";
3841   }
3842 };
3843 
3844 /// Align attribute for a floating value.
3845 struct AAAlignFloating : AAAlignImpl {
AAAlignFloating__anon0ce335530111::AAAlignFloating3846   AAAlignFloating(const IRPosition &IRP, Attributor &A) : AAAlignImpl(IRP, A) {}
3847 
3848   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAAlignFloating3849   ChangeStatus updateImpl(Attributor &A) override {
3850     const DataLayout &DL = A.getDataLayout();
3851 
3852     auto VisitValueCB = [&](Value &V, const Instruction *,
3853                             AAAlign::StateType &T, bool Stripped) -> bool {
3854       const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
3855       if (!Stripped && this == &AA) {
3856         int64_t Offset;
3857         unsigned Alignment = 1;
3858         if (const Value *Base =
3859                 GetPointerBaseWithConstantOffset(&V, Offset, DL)) {
3860           Align PA = Base->getPointerAlignment(DL);
3861           // BasePointerAddr + Offset = Alignment * Q for some integer Q.
3862           // So we can say that the maximum power of two which is a divisor of
3863           // gcd(Offset, Alignment) is an alignment.
3864 
3865           uint32_t gcd = greatestCommonDivisor(uint32_t(abs((int32_t)Offset)),
3866                                                uint32_t(PA.value()));
3867           Alignment = llvm::PowerOf2Floor(gcd);
3868         } else {
3869           Alignment = V.getPointerAlignment(DL).value();
3870         }
3871         // Use only IR information if we did not strip anything.
3872         T.takeKnownMaximum(Alignment);
3873         T.indicatePessimisticFixpoint();
3874       } else {
3875         // Use abstract attribute information.
3876         const AAAlign::StateType &DS = AA.getState();
3877         T ^= DS;
3878       }
3879       return T.isValidState();
3880     };
3881 
3882     StateType T;
3883     if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
3884                                                    VisitValueCB, getCtxI()))
3885       return indicatePessimisticFixpoint();
3886 
3887     // TODO: If we know we visited all incoming values, thus no are assumed
3888     // dead, we can take the known information from the state T.
3889     return clampStateAndIndicateChange(getState(), T);
3890   }
3891 
3892   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAAlignFloating3893   void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
3894 };
3895 
3896 /// Align attribute for function return value.
3897 struct AAAlignReturned final
3898     : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
3899   using Base = AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>;
AAAlignReturned__anon0ce335530111::AAAlignReturned3900   AAAlignReturned(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3901 
3902   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAAlignReturned3903   void initialize(Attributor &A) override {
3904     Base::initialize(A);
3905     Function *F = getAssociatedFunction();
3906     if (!F || F->isDeclaration())
3907       indicatePessimisticFixpoint();
3908   }
3909 
3910   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAAlignReturned3911   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
3912 };
3913 
3914 /// Align attribute for function argument.
3915 struct AAAlignArgument final
3916     : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
3917   using Base = AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>;
AAAlignArgument__anon0ce335530111::AAAlignArgument3918   AAAlignArgument(const IRPosition &IRP, Attributor &A) : Base(IRP, A) {}
3919 
3920   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAAlignArgument3921   ChangeStatus manifest(Attributor &A) override {
3922     // If the associated argument is involved in a must-tail call we give up
3923     // because we would need to keep the argument alignments of caller and
3924     // callee in-sync. Just does not seem worth the trouble right now.
3925     if (A.getInfoCache().isInvolvedInMustTailCall(*getAssociatedArgument()))
3926       return ChangeStatus::UNCHANGED;
3927     return Base::manifest(A);
3928   }
3929 
3930   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAAlignArgument3931   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
3932 };
3933 
3934 struct AAAlignCallSiteArgument final : AAAlignFloating {
AAAlignCallSiteArgument__anon0ce335530111::AAAlignCallSiteArgument3935   AAAlignCallSiteArgument(const IRPosition &IRP, Attributor &A)
3936       : AAAlignFloating(IRP, A) {}
3937 
3938   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAAlignCallSiteArgument3939   ChangeStatus manifest(Attributor &A) override {
3940     // If the associated argument is involved in a must-tail call we give up
3941     // because we would need to keep the argument alignments of caller and
3942     // callee in-sync. Just does not seem worth the trouble right now.
3943     if (Argument *Arg = getAssociatedArgument())
3944       if (A.getInfoCache().isInvolvedInMustTailCall(*Arg))
3945         return ChangeStatus::UNCHANGED;
3946     ChangeStatus Changed = AAAlignImpl::manifest(A);
3947     Align InheritAlign =
3948         getAssociatedValue().getPointerAlignment(A.getDataLayout());
3949     if (InheritAlign >= getAssumedAlign())
3950       Changed = ChangeStatus::UNCHANGED;
3951     return Changed;
3952   }
3953 
3954   /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anon0ce335530111::AAAlignCallSiteArgument3955   ChangeStatus updateImpl(Attributor &A) override {
3956     ChangeStatus Changed = AAAlignFloating::updateImpl(A);
3957     if (Argument *Arg = getAssociatedArgument()) {
3958       // We only take known information from the argument
3959       // so we do not need to track a dependence.
3960       const auto &ArgAlignAA = A.getAAFor<AAAlign>(
3961           *this, IRPosition::argument(*Arg), /* TrackDependence */ false);
3962       takeKnownMaximum(ArgAlignAA.getKnownAlign());
3963     }
3964     return Changed;
3965   }
3966 
3967   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAAlignCallSiteArgument3968   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
3969 };
3970 
3971 /// Align attribute deduction for a call site return value.
3972 struct AAAlignCallSiteReturned final
3973     : AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl> {
3974   using Base = AACallSiteReturnedFromReturned<AAAlign, AAAlignImpl>;
AAAlignCallSiteReturned__anon0ce335530111::AAAlignCallSiteReturned3975   AAAlignCallSiteReturned(const IRPosition &IRP, Attributor &A)
3976       : Base(IRP, A) {}
3977 
3978   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAAlignCallSiteReturned3979   void initialize(Attributor &A) override {
3980     Base::initialize(A);
3981     Function *F = getAssociatedFunction();
3982     if (!F || F->isDeclaration())
3983       indicatePessimisticFixpoint();
3984   }
3985 
3986   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAAlignCallSiteReturned3987   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(align); }
3988 };
3989 
3990 /// ------------------ Function No-Return Attribute ----------------------------
3991 struct AANoReturnImpl : public AANoReturn {
AANoReturnImpl__anon0ce335530111::AANoReturnImpl3992   AANoReturnImpl(const IRPosition &IRP, Attributor &A) : AANoReturn(IRP, A) {}
3993 
3994   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoReturnImpl3995   void initialize(Attributor &A) override {
3996     AANoReturn::initialize(A);
3997     Function *F = getAssociatedFunction();
3998     if (!F || F->isDeclaration())
3999       indicatePessimisticFixpoint();
4000   }
4001 
4002   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AANoReturnImpl4003   const std::string getAsStr() const override {
4004     return getAssumed() ? "noreturn" : "may-return";
4005   }
4006 
4007   /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anon0ce335530111::AANoReturnImpl4008   virtual ChangeStatus updateImpl(Attributor &A) override {
4009     auto CheckForNoReturn = [](Instruction &) { return false; };
4010     if (!A.checkForAllInstructions(CheckForNoReturn, *this,
4011                                    {(unsigned)Instruction::Ret}))
4012       return indicatePessimisticFixpoint();
4013     return ChangeStatus::UNCHANGED;
4014   }
4015 };
4016 
4017 struct AANoReturnFunction final : AANoReturnImpl {
AANoReturnFunction__anon0ce335530111::AANoReturnFunction4018   AANoReturnFunction(const IRPosition &IRP, Attributor &A)
4019       : AANoReturnImpl(IRP, A) {}
4020 
4021   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoReturnFunction4022   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
4023 };
4024 
4025 /// NoReturn attribute deduction for a call sites.
4026 struct AANoReturnCallSite final : AANoReturnImpl {
AANoReturnCallSite__anon0ce335530111::AANoReturnCallSite4027   AANoReturnCallSite(const IRPosition &IRP, Attributor &A)
4028       : AANoReturnImpl(IRP, A) {}
4029 
4030   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoReturnCallSite4031   void initialize(Attributor &A) override {
4032     AANoReturnImpl::initialize(A);
4033     if (Function *F = getAssociatedFunction()) {
4034       const IRPosition &FnPos = IRPosition::function(*F);
4035       auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4036       if (!FnAA.isAssumedNoReturn())
4037         indicatePessimisticFixpoint();
4038     }
4039   }
4040 
4041   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoReturnCallSite4042   ChangeStatus updateImpl(Attributor &A) override {
4043     // TODO: Once we have call site specific value information we can provide
4044     //       call site specific liveness information and then it makes
4045     //       sense to specialize attributes for call sites arguments instead of
4046     //       redirecting requests to the callee argument.
4047     Function *F = getAssociatedFunction();
4048     const IRPosition &FnPos = IRPosition::function(*F);
4049     auto &FnAA = A.getAAFor<AANoReturn>(*this, FnPos);
4050     return clampStateAndIndicateChange(getState(), FnAA.getState());
4051   }
4052 
4053   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoReturnCallSite4054   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(noreturn); }
4055 };
4056 
4057 /// ----------------------- Variable Capturing ---------------------------------
4058 
4059 /// A class to hold the state of for no-capture attributes.
4060 struct AANoCaptureImpl : public AANoCapture {
AANoCaptureImpl__anon0ce335530111::AANoCaptureImpl4061   AANoCaptureImpl(const IRPosition &IRP, Attributor &A) : AANoCapture(IRP, A) {}
4062 
4063   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoCaptureImpl4064   void initialize(Attributor &A) override {
4065     if (hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ true)) {
4066       indicateOptimisticFixpoint();
4067       return;
4068     }
4069     Function *AnchorScope = getAnchorScope();
4070     if (isFnInterfaceKind() &&
4071         (!AnchorScope || !A.isFunctionIPOAmendable(*AnchorScope))) {
4072       indicatePessimisticFixpoint();
4073       return;
4074     }
4075 
4076     // You cannot "capture" null in the default address space.
4077     if (isa<ConstantPointerNull>(getAssociatedValue()) &&
4078         getAssociatedValue().getType()->getPointerAddressSpace() == 0) {
4079       indicateOptimisticFixpoint();
4080       return;
4081     }
4082 
4083     const Function *F =
4084         isArgumentPosition() ? getAssociatedFunction() : AnchorScope;
4085 
4086     // Check what state the associated function can actually capture.
4087     if (F)
4088       determineFunctionCaptureCapabilities(getIRPosition(), *F, *this);
4089     else
4090       indicatePessimisticFixpoint();
4091   }
4092 
4093   /// See AbstractAttribute::updateImpl(...).
4094   ChangeStatus updateImpl(Attributor &A) override;
4095 
4096   /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
4097   virtual void
getDeducedAttributes__anon0ce335530111::AANoCaptureImpl4098   getDeducedAttributes(LLVMContext &Ctx,
4099                        SmallVectorImpl<Attribute> &Attrs) const override {
4100     if (!isAssumedNoCaptureMaybeReturned())
4101       return;
4102 
4103     if (isArgumentPosition()) {
4104       if (isAssumedNoCapture())
4105         Attrs.emplace_back(Attribute::get(Ctx, Attribute::NoCapture));
4106       else if (ManifestInternal)
4107         Attrs.emplace_back(Attribute::get(Ctx, "no-capture-maybe-returned"));
4108     }
4109   }
4110 
4111   /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
4112   /// depending on the ability of the function associated with \p IRP to capture
4113   /// state in memory and through "returning/throwing", respectively.
determineFunctionCaptureCapabilities__anon0ce335530111::AANoCaptureImpl4114   static void determineFunctionCaptureCapabilities(const IRPosition &IRP,
4115                                                    const Function &F,
4116                                                    BitIntegerState &State) {
4117     // TODO: Once we have memory behavior attributes we should use them here.
4118 
4119     // If we know we cannot communicate or write to memory, we do not care about
4120     // ptr2int anymore.
4121     if (F.onlyReadsMemory() && F.doesNotThrow() &&
4122         F.getReturnType()->isVoidTy()) {
4123       State.addKnownBits(NO_CAPTURE);
4124       return;
4125     }
4126 
4127     // A function cannot capture state in memory if it only reads memory, it can
4128     // however return/throw state and the state might be influenced by the
4129     // pointer value, e.g., loading from a returned pointer might reveal a bit.
4130     if (F.onlyReadsMemory())
4131       State.addKnownBits(NOT_CAPTURED_IN_MEM);
4132 
4133     // A function cannot communicate state back if it does not through
4134     // exceptions and doesn not return values.
4135     if (F.doesNotThrow() && F.getReturnType()->isVoidTy())
4136       State.addKnownBits(NOT_CAPTURED_IN_RET);
4137 
4138     // Check existing "returned" attributes.
4139     int ArgNo = IRP.getCalleeArgNo();
4140     if (F.doesNotThrow() && ArgNo >= 0) {
4141       for (unsigned u = 0, e = F.arg_size(); u < e; ++u)
4142         if (F.hasParamAttribute(u, Attribute::Returned)) {
4143           if (u == unsigned(ArgNo))
4144             State.removeAssumedBits(NOT_CAPTURED_IN_RET);
4145           else if (F.onlyReadsMemory())
4146             State.addKnownBits(NO_CAPTURE);
4147           else
4148             State.addKnownBits(NOT_CAPTURED_IN_RET);
4149           break;
4150         }
4151     }
4152   }
4153 
4154   /// See AbstractState::getAsStr().
getAsStr__anon0ce335530111::AANoCaptureImpl4155   const std::string getAsStr() const override {
4156     if (isKnownNoCapture())
4157       return "known not-captured";
4158     if (isAssumedNoCapture())
4159       return "assumed not-captured";
4160     if (isKnownNoCaptureMaybeReturned())
4161       return "known not-captured-maybe-returned";
4162     if (isAssumedNoCaptureMaybeReturned())
4163       return "assumed not-captured-maybe-returned";
4164     return "assumed-captured";
4165   }
4166 };
4167 
4168 /// Attributor-aware capture tracker.
4169 struct AACaptureUseTracker final : public CaptureTracker {
4170 
4171   /// Create a capture tracker that can lookup in-flight abstract attributes
4172   /// through the Attributor \p A.
4173   ///
4174   /// If a use leads to a potential capture, \p CapturedInMemory is set and the
4175   /// search is stopped. If a use leads to a return instruction,
4176   /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
4177   /// If a use leads to a ptr2int which may capture the value,
4178   /// \p CapturedInInteger is set. If a use is found that is currently assumed
4179   /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
4180   /// set. All values in \p PotentialCopies are later tracked as well. For every
4181   /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
4182   /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
4183   /// conservatively set to true.
AACaptureUseTracker__anon0ce335530111::AACaptureUseTracker4184   AACaptureUseTracker(Attributor &A, AANoCapture &NoCaptureAA,
4185                       const AAIsDead &IsDeadAA, AANoCapture::StateType &State,
4186                       SmallVectorImpl<const Value *> &PotentialCopies,
4187                       unsigned &RemainingUsesToExplore)
4188       : A(A), NoCaptureAA(NoCaptureAA), IsDeadAA(IsDeadAA), State(State),
4189         PotentialCopies(PotentialCopies),
4190         RemainingUsesToExplore(RemainingUsesToExplore) {}
4191 
4192   /// Determine if \p V maybe captured. *Also updates the state!*
valueMayBeCaptured__anon0ce335530111::AACaptureUseTracker4193   bool valueMayBeCaptured(const Value *V) {
4194     if (V->getType()->isPointerTy()) {
4195       PointerMayBeCaptured(V, this);
4196     } else {
4197       State.indicatePessimisticFixpoint();
4198     }
4199     return State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4200   }
4201 
4202   /// See CaptureTracker::tooManyUses().
tooManyUses__anon0ce335530111::AACaptureUseTracker4203   void tooManyUses() override {
4204     State.removeAssumedBits(AANoCapture::NO_CAPTURE);
4205   }
4206 
isDereferenceableOrNull__anon0ce335530111::AACaptureUseTracker4207   bool isDereferenceableOrNull(Value *O, const DataLayout &DL) override {
4208     if (CaptureTracker::isDereferenceableOrNull(O, DL))
4209       return true;
4210     const auto &DerefAA = A.getAAFor<AADereferenceable>(
4211         NoCaptureAA, IRPosition::value(*O), /* TrackDependence */ true,
4212         DepClassTy::OPTIONAL);
4213     return DerefAA.getAssumedDereferenceableBytes();
4214   }
4215 
4216   /// See CaptureTracker::captured(...).
captured__anon0ce335530111::AACaptureUseTracker4217   bool captured(const Use *U) override {
4218     Instruction *UInst = cast<Instruction>(U->getUser());
4219     LLVM_DEBUG(dbgs() << "Check use: " << *U->get() << " in " << *UInst
4220                       << "\n");
4221 
4222     // Because we may reuse the tracker multiple times we keep track of the
4223     // number of explored uses ourselves as well.
4224     if (RemainingUsesToExplore-- == 0) {
4225       LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
4226       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4227                           /* Return */ true);
4228     }
4229 
4230     // Deal with ptr2int by following uses.
4231     if (isa<PtrToIntInst>(UInst)) {
4232       LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
4233       return valueMayBeCaptured(UInst);
4234     }
4235 
4236     // Explicitly catch return instructions.
4237     if (isa<ReturnInst>(UInst))
4238       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4239                           /* Return */ true);
4240 
4241     // For now we only use special logic for call sites. However, the tracker
4242     // itself knows about a lot of other non-capturing cases already.
4243     auto *CB = dyn_cast<CallBase>(UInst);
4244     if (!CB || !CB->isArgOperand(U))
4245       return isCapturedIn(/* Memory */ true, /* Integer */ true,
4246                           /* Return */ true);
4247 
4248     unsigned ArgNo = CB->getArgOperandNo(U);
4249     const IRPosition &CSArgPos = IRPosition::callsite_argument(*CB, ArgNo);
4250     // If we have a abstract no-capture attribute for the argument we can use
4251     // it to justify a non-capture attribute here. This allows recursion!
4252     auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(NoCaptureAA, CSArgPos);
4253     if (ArgNoCaptureAA.isAssumedNoCapture())
4254       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4255                           /* Return */ false);
4256     if (ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
4257       addPotentialCopy(*CB);
4258       return isCapturedIn(/* Memory */ false, /* Integer */ false,
4259                           /* Return */ false);
4260     }
4261 
4262     // Lastly, we could not find a reason no-capture can be assumed so we don't.
4263     return isCapturedIn(/* Memory */ true, /* Integer */ true,
4264                         /* Return */ true);
4265   }
4266 
4267   /// Register \p CS as potential copy of the value we are checking.
addPotentialCopy__anon0ce335530111::AACaptureUseTracker4268   void addPotentialCopy(CallBase &CB) { PotentialCopies.push_back(&CB); }
4269 
4270   /// See CaptureTracker::shouldExplore(...).
shouldExplore__anon0ce335530111::AACaptureUseTracker4271   bool shouldExplore(const Use *U) override {
4272     // Check liveness and ignore droppable users.
4273     return !U->getUser()->isDroppable() &&
4274            !A.isAssumedDead(*U, &NoCaptureAA, &IsDeadAA);
4275   }
4276 
4277   /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
4278   /// \p CapturedInRet, then return the appropriate value for use in the
4279   /// CaptureTracker::captured() interface.
isCapturedIn__anon0ce335530111::AACaptureUseTracker4280   bool isCapturedIn(bool CapturedInMem, bool CapturedInInt,
4281                     bool CapturedInRet) {
4282     LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem << "|Int "
4283                       << CapturedInInt << "|Ret " << CapturedInRet << "]\n");
4284     if (CapturedInMem)
4285       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM);
4286     if (CapturedInInt)
4287       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT);
4288     if (CapturedInRet)
4289       State.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET);
4290     return !State.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED);
4291   }
4292 
4293 private:
4294   /// The attributor providing in-flight abstract attributes.
4295   Attributor &A;
4296 
4297   /// The abstract attribute currently updated.
4298   AANoCapture &NoCaptureAA;
4299 
4300   /// The abstract liveness state.
4301   const AAIsDead &IsDeadAA;
4302 
4303   /// The state currently updated.
4304   AANoCapture::StateType &State;
4305 
4306   /// Set of potential copies of the tracked value.
4307   SmallVectorImpl<const Value *> &PotentialCopies;
4308 
4309   /// Global counter to limit the number of explored uses.
4310   unsigned &RemainingUsesToExplore;
4311 };
4312 
updateImpl(Attributor & A)4313 ChangeStatus AANoCaptureImpl::updateImpl(Attributor &A) {
4314   const IRPosition &IRP = getIRPosition();
4315   const Value *V = isArgumentPosition() ? IRP.getAssociatedArgument()
4316                                         : &IRP.getAssociatedValue();
4317   if (!V)
4318     return indicatePessimisticFixpoint();
4319 
4320   const Function *F =
4321       isArgumentPosition() ? IRP.getAssociatedFunction() : IRP.getAnchorScope();
4322   assert(F && "Expected a function!");
4323   const IRPosition &FnPos = IRPosition::function(*F);
4324   const auto &IsDeadAA =
4325       A.getAAFor<AAIsDead>(*this, FnPos, /* TrackDependence */ false);
4326 
4327   AANoCapture::StateType T;
4328 
4329   // Readonly means we cannot capture through memory.
4330   const auto &FnMemAA =
4331       A.getAAFor<AAMemoryBehavior>(*this, FnPos, /* TrackDependence */ false);
4332   if (FnMemAA.isAssumedReadOnly()) {
4333     T.addKnownBits(NOT_CAPTURED_IN_MEM);
4334     if (FnMemAA.isKnownReadOnly())
4335       addKnownBits(NOT_CAPTURED_IN_MEM);
4336     else
4337       A.recordDependence(FnMemAA, *this, DepClassTy::OPTIONAL);
4338   }
4339 
4340   // Make sure all returned values are different than the underlying value.
4341   // TODO: we could do this in a more sophisticated way inside
4342   //       AAReturnedValues, e.g., track all values that escape through returns
4343   //       directly somehow.
4344   auto CheckReturnedArgs = [&](const AAReturnedValues &RVAA) {
4345     bool SeenConstant = false;
4346     for (auto &It : RVAA.returned_values()) {
4347       if (isa<Constant>(It.first)) {
4348         if (SeenConstant)
4349           return false;
4350         SeenConstant = true;
4351       } else if (!isa<Argument>(It.first) ||
4352                  It.first == getAssociatedArgument())
4353         return false;
4354     }
4355     return true;
4356   };
4357 
4358   const auto &NoUnwindAA = A.getAAFor<AANoUnwind>(
4359       *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
4360   if (NoUnwindAA.isAssumedNoUnwind()) {
4361     bool IsVoidTy = F->getReturnType()->isVoidTy();
4362     const AAReturnedValues *RVAA =
4363         IsVoidTy ? nullptr
4364                  : &A.getAAFor<AAReturnedValues>(*this, FnPos,
4365                                                  /* TrackDependence */ true,
4366                                                  DepClassTy::OPTIONAL);
4367     if (IsVoidTy || CheckReturnedArgs(*RVAA)) {
4368       T.addKnownBits(NOT_CAPTURED_IN_RET);
4369       if (T.isKnown(NOT_CAPTURED_IN_MEM))
4370         return ChangeStatus::UNCHANGED;
4371       if (NoUnwindAA.isKnownNoUnwind() &&
4372           (IsVoidTy || RVAA->getState().isAtFixpoint())) {
4373         addKnownBits(NOT_CAPTURED_IN_RET);
4374         if (isKnown(NOT_CAPTURED_IN_MEM))
4375           return indicateOptimisticFixpoint();
4376       }
4377     }
4378   }
4379 
4380   // Use the CaptureTracker interface and logic with the specialized tracker,
4381   // defined in AACaptureUseTracker, that can look at in-flight abstract
4382   // attributes and directly updates the assumed state.
4383   SmallVector<const Value *, 4> PotentialCopies;
4384   unsigned RemainingUsesToExplore =
4385       getDefaultMaxUsesToExploreForCaptureTracking();
4386   AACaptureUseTracker Tracker(A, *this, IsDeadAA, T, PotentialCopies,
4387                               RemainingUsesToExplore);
4388 
4389   // Check all potential copies of the associated value until we can assume
4390   // none will be captured or we have to assume at least one might be.
4391   unsigned Idx = 0;
4392   PotentialCopies.push_back(V);
4393   while (T.isAssumed(NO_CAPTURE_MAYBE_RETURNED) && Idx < PotentialCopies.size())
4394     Tracker.valueMayBeCaptured(PotentialCopies[Idx++]);
4395 
4396   AANoCapture::StateType &S = getState();
4397   auto Assumed = S.getAssumed();
4398   S.intersectAssumedBits(T.getAssumed());
4399   if (!isAssumedNoCaptureMaybeReturned())
4400     return indicatePessimisticFixpoint();
4401   return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
4402                                    : ChangeStatus::CHANGED;
4403 }
4404 
4405 /// NoCapture attribute for function arguments.
4406 struct AANoCaptureArgument final : AANoCaptureImpl {
AANoCaptureArgument__anon0ce335530111::AANoCaptureArgument4407   AANoCaptureArgument(const IRPosition &IRP, Attributor &A)
4408       : AANoCaptureImpl(IRP, A) {}
4409 
4410   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoCaptureArgument4411   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nocapture) }
4412 };
4413 
4414 /// NoCapture attribute for call site arguments.
4415 struct AANoCaptureCallSiteArgument final : AANoCaptureImpl {
AANoCaptureCallSiteArgument__anon0ce335530111::AANoCaptureCallSiteArgument4416   AANoCaptureCallSiteArgument(const IRPosition &IRP, Attributor &A)
4417       : AANoCaptureImpl(IRP, A) {}
4418 
4419   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoCaptureCallSiteArgument4420   void initialize(Attributor &A) override {
4421     if (Argument *Arg = getAssociatedArgument())
4422       if (Arg->hasByValAttr())
4423         indicateOptimisticFixpoint();
4424     AANoCaptureImpl::initialize(A);
4425   }
4426 
4427   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoCaptureCallSiteArgument4428   ChangeStatus updateImpl(Attributor &A) override {
4429     // TODO: Once we have call site specific value information we can provide
4430     //       call site specific liveness information and then it makes
4431     //       sense to specialize attributes for call sites arguments instead of
4432     //       redirecting requests to the callee argument.
4433     Argument *Arg = getAssociatedArgument();
4434     if (!Arg)
4435       return indicatePessimisticFixpoint();
4436     const IRPosition &ArgPos = IRPosition::argument(*Arg);
4437     auto &ArgAA = A.getAAFor<AANoCapture>(*this, ArgPos);
4438     return clampStateAndIndicateChange(getState(), ArgAA.getState());
4439   }
4440 
4441   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoCaptureCallSiteArgument4442   void trackStatistics() const override{STATS_DECLTRACK_CSARG_ATTR(nocapture)};
4443 };
4444 
4445 /// NoCapture attribute for floating values.
4446 struct AANoCaptureFloating final : AANoCaptureImpl {
AANoCaptureFloating__anon0ce335530111::AANoCaptureFloating4447   AANoCaptureFloating(const IRPosition &IRP, Attributor &A)
4448       : AANoCaptureImpl(IRP, A) {}
4449 
4450   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoCaptureFloating4451   void trackStatistics() const override {
4452     STATS_DECLTRACK_FLOATING_ATTR(nocapture)
4453   }
4454 };
4455 
4456 /// NoCapture attribute for function return value.
4457 struct AANoCaptureReturned final : AANoCaptureImpl {
AANoCaptureReturned__anon0ce335530111::AANoCaptureReturned4458   AANoCaptureReturned(const IRPosition &IRP, Attributor &A)
4459       : AANoCaptureImpl(IRP, A) {
4460     llvm_unreachable("NoCapture is not applicable to function returns!");
4461   }
4462 
4463   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AANoCaptureReturned4464   void initialize(Attributor &A) override {
4465     llvm_unreachable("NoCapture is not applicable to function returns!");
4466   }
4467 
4468   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AANoCaptureReturned4469   ChangeStatus updateImpl(Attributor &A) override {
4470     llvm_unreachable("NoCapture is not applicable to function returns!");
4471   }
4472 
4473   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoCaptureReturned4474   void trackStatistics() const override {}
4475 };
4476 
4477 /// NoCapture attribute deduction for a call site return value.
4478 struct AANoCaptureCallSiteReturned final : AANoCaptureImpl {
AANoCaptureCallSiteReturned__anon0ce335530111::AANoCaptureCallSiteReturned4479   AANoCaptureCallSiteReturned(const IRPosition &IRP, Attributor &A)
4480       : AANoCaptureImpl(IRP, A) {}
4481 
4482   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AANoCaptureCallSiteReturned4483   void trackStatistics() const override {
4484     STATS_DECLTRACK_CSRET_ATTR(nocapture)
4485   }
4486 };
4487 
4488 /// ------------------ Value Simplify Attribute ----------------------------
4489 struct AAValueSimplifyImpl : AAValueSimplify {
AAValueSimplifyImpl__anon0ce335530111::AAValueSimplifyImpl4490   AAValueSimplifyImpl(const IRPosition &IRP, Attributor &A)
4491       : AAValueSimplify(IRP, A) {}
4492 
4493   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAValueSimplifyImpl4494   void initialize(Attributor &A) override {
4495     if (getAssociatedValue().getType()->isVoidTy())
4496       indicatePessimisticFixpoint();
4497   }
4498 
4499   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335530111::AAValueSimplifyImpl4500   const std::string getAsStr() const override {
4501     return getAssumed() ? (getKnown() ? "simplified" : "maybe-simple")
4502                         : "not-simple";
4503   }
4504 
4505   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAValueSimplifyImpl4506   void trackStatistics() const override {}
4507 
4508   /// See AAValueSimplify::getAssumedSimplifiedValue()
getAssumedSimplifiedValue__anon0ce335530111::AAValueSimplifyImpl4509   Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const override {
4510     if (!getAssumed())
4511       return const_cast<Value *>(&getAssociatedValue());
4512     return SimplifiedAssociatedValue;
4513   }
4514 
4515   /// Helper function for querying AAValueSimplify and updating candicate.
4516   /// \param QueryingValue Value trying to unify with SimplifiedValue
4517   /// \param AccumulatedSimplifiedValue Current simplification result.
checkAndUpdate__anon0ce335530111::AAValueSimplifyImpl4518   static bool checkAndUpdate(Attributor &A, const AbstractAttribute &QueryingAA,
4519                              Value &QueryingValue,
4520                              Optional<Value *> &AccumulatedSimplifiedValue) {
4521     // FIXME: Add a typecast support.
4522 
4523     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
4524         QueryingAA, IRPosition::value(QueryingValue));
4525 
4526     Optional<Value *> QueryingValueSimplified =
4527         ValueSimplifyAA.getAssumedSimplifiedValue(A);
4528 
4529     if (!QueryingValueSimplified.hasValue())
4530       return true;
4531 
4532     if (!QueryingValueSimplified.getValue())
4533       return false;
4534 
4535     Value &QueryingValueSimplifiedUnwrapped =
4536         *QueryingValueSimplified.getValue();
4537 
4538     if (AccumulatedSimplifiedValue.hasValue() &&
4539         !isa<UndefValue>(AccumulatedSimplifiedValue.getValue()) &&
4540         !isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4541       return AccumulatedSimplifiedValue == QueryingValueSimplified;
4542     if (AccumulatedSimplifiedValue.hasValue() &&
4543         isa<UndefValue>(QueryingValueSimplifiedUnwrapped))
4544       return true;
4545 
4546     LLVM_DEBUG(dbgs() << "[ValueSimplify] " << QueryingValue
4547                       << " is assumed to be "
4548                       << QueryingValueSimplifiedUnwrapped << "\n");
4549 
4550     AccumulatedSimplifiedValue = QueryingValueSimplified;
4551     return true;
4552   }
4553 
4554   /// Returns a candidate is found or not
askSimplifiedValueFor__anon0ce335530111::AAValueSimplifyImpl4555   template <typename AAType> bool askSimplifiedValueFor(Attributor &A) {
4556     if (!getAssociatedValue().getType()->isIntegerTy())
4557       return false;
4558 
4559     const auto &AA =
4560         A.getAAFor<AAType>(*this, getIRPosition(), /* TrackDependence */ false);
4561 
4562     Optional<ConstantInt *> COpt = AA.getAssumedConstantInt(A);
4563 
4564     if (!COpt.hasValue()) {
4565       SimplifiedAssociatedValue = llvm::None;
4566       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4567       return true;
4568     }
4569     if (auto *C = COpt.getValue()) {
4570       SimplifiedAssociatedValue = C;
4571       A.recordDependence(AA, *this, DepClassTy::OPTIONAL);
4572       return true;
4573     }
4574     return false;
4575   }
4576 
askSimplifiedValueForOtherAAs__anon0ce335530111::AAValueSimplifyImpl4577   bool askSimplifiedValueForOtherAAs(Attributor &A) {
4578     if (askSimplifiedValueFor<AAValueConstantRange>(A))
4579       return true;
4580     if (askSimplifiedValueFor<AAPotentialValues>(A))
4581       return true;
4582     return false;
4583   }
4584 
4585   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAValueSimplifyImpl4586   ChangeStatus manifest(Attributor &A) override {
4587     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4588 
4589     if (SimplifiedAssociatedValue.hasValue() &&
4590         !SimplifiedAssociatedValue.getValue())
4591       return Changed;
4592 
4593     Value &V = getAssociatedValue();
4594     auto *C = SimplifiedAssociatedValue.hasValue()
4595                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4596                   : UndefValue::get(V.getType());
4597     if (C) {
4598       // We can replace the AssociatedValue with the constant.
4599       if (!V.user_empty() && &V != C && V.getType() == C->getType()) {
4600         LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *C
4601                           << " :: " << *this << "\n");
4602         if (A.changeValueAfterManifest(V, *C))
4603           Changed = ChangeStatus::CHANGED;
4604       }
4605     }
4606 
4607     return Changed | AAValueSimplify::manifest(A);
4608   }
4609 
4610   /// See AbstractState::indicatePessimisticFixpoint(...).
indicatePessimisticFixpoint__anon0ce335530111::AAValueSimplifyImpl4611   ChangeStatus indicatePessimisticFixpoint() override {
4612     // NOTE: Associated value will be returned in a pessimistic fixpoint and is
4613     // regarded as known. That's why`indicateOptimisticFixpoint` is called.
4614     SimplifiedAssociatedValue = &getAssociatedValue();
4615     indicateOptimisticFixpoint();
4616     return ChangeStatus::CHANGED;
4617   }
4618 
4619 protected:
4620   // An assumed simplified value. Initially, it is set to Optional::None, which
4621   // means that the value is not clear under current assumption. If in the
4622   // pessimistic state, getAssumedSimplifiedValue doesn't return this value but
4623   // returns orignal associated value.
4624   Optional<Value *> SimplifiedAssociatedValue;
4625 };
4626 
4627 struct AAValueSimplifyArgument final : AAValueSimplifyImpl {
AAValueSimplifyArgument__anon0ce335530111::AAValueSimplifyArgument4628   AAValueSimplifyArgument(const IRPosition &IRP, Attributor &A)
4629       : AAValueSimplifyImpl(IRP, A) {}
4630 
initialize__anon0ce335530111::AAValueSimplifyArgument4631   void initialize(Attributor &A) override {
4632     AAValueSimplifyImpl::initialize(A);
4633     if (!getAnchorScope() || getAnchorScope()->isDeclaration())
4634       indicatePessimisticFixpoint();
4635     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated,
4636                  Attribute::StructRet, Attribute::Nest},
4637                 /* IgnoreSubsumingPositions */ true))
4638       indicatePessimisticFixpoint();
4639 
4640     // FIXME: This is a hack to prevent us from propagating function poiner in
4641     // the new pass manager CGSCC pass as it creates call edges the
4642     // CallGraphUpdater cannot handle yet.
4643     Value &V = getAssociatedValue();
4644     if (V.getType()->isPointerTy() &&
4645         V.getType()->getPointerElementType()->isFunctionTy() &&
4646         !A.isModulePass())
4647       indicatePessimisticFixpoint();
4648   }
4649 
4650   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAValueSimplifyArgument4651   ChangeStatus updateImpl(Attributor &A) override {
4652     // Byval is only replacable if it is readonly otherwise we would write into
4653     // the replaced value and not the copy that byval creates implicitly.
4654     Argument *Arg = getAssociatedArgument();
4655     if (Arg->hasByValAttr()) {
4656       // TODO: We probably need to verify synchronization is not an issue, e.g.,
4657       //       there is no race by not copying a constant byval.
4658       const auto &MemAA = A.getAAFor<AAMemoryBehavior>(*this, getIRPosition());
4659       if (!MemAA.isAssumedReadOnly())
4660         return indicatePessimisticFixpoint();
4661     }
4662 
4663     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4664 
4665     auto PredForCallSite = [&](AbstractCallSite ACS) {
4666       const IRPosition &ACSArgPos =
4667           IRPosition::callsite_argument(ACS, getCallSiteArgNo());
4668       // Check if a coresponding argument was found or if it is on not
4669       // associated (which can happen for callback calls).
4670       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
4671         return false;
4672 
4673       // We can only propagate thread independent values through callbacks.
4674       // This is different to direct/indirect call sites because for them we
4675       // know the thread executing the caller and callee is the same. For
4676       // callbacks this is not guaranteed, thus a thread dependent value could
4677       // be different for the caller and callee, making it invalid to propagate.
4678       Value &ArgOp = ACSArgPos.getAssociatedValue();
4679       if (ACS.isCallbackCall())
4680         if (auto *C = dyn_cast<Constant>(&ArgOp))
4681           if (C->isThreadDependent())
4682             return false;
4683       return checkAndUpdate(A, *this, ArgOp, SimplifiedAssociatedValue);
4684     };
4685 
4686     bool AllCallSitesKnown;
4687     if (!A.checkForAllCallSites(PredForCallSite, *this, true,
4688                                 AllCallSitesKnown))
4689       if (!askSimplifiedValueForOtherAAs(A))
4690         return indicatePessimisticFixpoint();
4691 
4692     // If a candicate was found in this update, return CHANGED.
4693     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4694                ? ChangeStatus::UNCHANGED
4695                : ChangeStatus ::CHANGED;
4696   }
4697 
4698   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAValueSimplifyArgument4699   void trackStatistics() const override {
4700     STATS_DECLTRACK_ARG_ATTR(value_simplify)
4701   }
4702 };
4703 
4704 struct AAValueSimplifyReturned : AAValueSimplifyImpl {
AAValueSimplifyReturned__anon0ce335530111::AAValueSimplifyReturned4705   AAValueSimplifyReturned(const IRPosition &IRP, Attributor &A)
4706       : AAValueSimplifyImpl(IRP, A) {}
4707 
4708   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAValueSimplifyReturned4709   ChangeStatus updateImpl(Attributor &A) override {
4710     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4711 
4712     auto PredForReturned = [&](Value &V) {
4713       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4714     };
4715 
4716     if (!A.checkForAllReturnedValues(PredForReturned, *this))
4717       if (!askSimplifiedValueForOtherAAs(A))
4718         return indicatePessimisticFixpoint();
4719 
4720     // If a candicate was found in this update, return CHANGED.
4721     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4722                ? ChangeStatus::UNCHANGED
4723                : ChangeStatus ::CHANGED;
4724   }
4725 
manifest__anon0ce335530111::AAValueSimplifyReturned4726   ChangeStatus manifest(Attributor &A) override {
4727     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4728 
4729     if (SimplifiedAssociatedValue.hasValue() &&
4730         !SimplifiedAssociatedValue.getValue())
4731       return Changed;
4732 
4733     Value &V = getAssociatedValue();
4734     auto *C = SimplifiedAssociatedValue.hasValue()
4735                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4736                   : UndefValue::get(V.getType());
4737     if (C) {
4738       auto PredForReturned =
4739           [&](Value &V, const SmallSetVector<ReturnInst *, 4> &RetInsts) {
4740             // We can replace the AssociatedValue with the constant.
4741             if (&V == C || V.getType() != C->getType() || isa<UndefValue>(V))
4742               return true;
4743 
4744             for (ReturnInst *RI : RetInsts) {
4745               if (RI->getFunction() != getAnchorScope())
4746                 continue;
4747               auto *RC = C;
4748               if (RC->getType() != RI->getReturnValue()->getType())
4749                 RC = ConstantExpr::getBitCast(RC,
4750                                               RI->getReturnValue()->getType());
4751               LLVM_DEBUG(dbgs() << "[ValueSimplify] " << V << " -> " << *RC
4752                                 << " in " << *RI << " :: " << *this << "\n");
4753               if (A.changeUseAfterManifest(RI->getOperandUse(0), *RC))
4754                 Changed = ChangeStatus::CHANGED;
4755             }
4756             return true;
4757           };
4758       A.checkForAllReturnedValuesAndReturnInsts(PredForReturned, *this);
4759     }
4760 
4761     return Changed | AAValueSimplify::manifest(A);
4762   }
4763 
4764   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAValueSimplifyReturned4765   void trackStatistics() const override {
4766     STATS_DECLTRACK_FNRET_ATTR(value_simplify)
4767   }
4768 };
4769 
4770 struct AAValueSimplifyFloating : AAValueSimplifyImpl {
AAValueSimplifyFloating__anon0ce335530111::AAValueSimplifyFloating4771   AAValueSimplifyFloating(const IRPosition &IRP, Attributor &A)
4772       : AAValueSimplifyImpl(IRP, A) {}
4773 
4774   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAValueSimplifyFloating4775   void initialize(Attributor &A) override {
4776     // FIXME: This might have exposed a SCC iterator update bug in the old PM.
4777     //        Needs investigation.
4778     // AAValueSimplifyImpl::initialize(A);
4779     Value &V = getAnchorValue();
4780 
4781     // TODO: add other stuffs
4782     if (isa<Constant>(V))
4783       indicatePessimisticFixpoint();
4784   }
4785 
4786   /// Check if \p ICmp is an equality comparison (==/!=) with at least one
4787   /// nullptr. If so, try to simplify it using AANonNull on the other operand.
4788   /// Return true if successful, in that case SimplifiedAssociatedValue will be
4789   /// updated and \p Changed is set appropriately.
checkForNullPtrCompare__anon0ce335530111::AAValueSimplifyFloating4790   bool checkForNullPtrCompare(Attributor &A, ICmpInst *ICmp,
4791                               ChangeStatus &Changed) {
4792     if (!ICmp)
4793       return false;
4794     if (!ICmp->isEquality())
4795       return false;
4796 
4797     // This is a comparison with == or !-. We check for nullptr now.
4798     bool Op0IsNull = isa<ConstantPointerNull>(ICmp->getOperand(0));
4799     bool Op1IsNull = isa<ConstantPointerNull>(ICmp->getOperand(1));
4800     if (!Op0IsNull && !Op1IsNull)
4801       return false;
4802 
4803     LLVMContext &Ctx = ICmp->getContext();
4804     // Check for `nullptr ==/!= nullptr` first:
4805     if (Op0IsNull && Op1IsNull) {
4806       Value *NewVal = ConstantInt::get(
4807           Type::getInt1Ty(Ctx), ICmp->getPredicate() == CmpInst::ICMP_EQ);
4808       assert(!SimplifiedAssociatedValue.hasValue() &&
4809              "Did not expect non-fixed value for constant comparison");
4810       SimplifiedAssociatedValue = NewVal;
4811       indicateOptimisticFixpoint();
4812       Changed = ChangeStatus::CHANGED;
4813       return true;
4814     }
4815 
4816     // Left is the nullptr ==/!= non-nullptr case. We'll use AANonNull on the
4817     // non-nullptr operand and if we assume it's non-null we can conclude the
4818     // result of the comparison.
4819     assert((Op0IsNull || Op1IsNull) &&
4820            "Expected nullptr versus non-nullptr comparison at this point");
4821 
4822     // The index is the operand that we assume is not null.
4823     unsigned PtrIdx = Op0IsNull;
4824     auto &PtrNonNullAA = A.getAAFor<AANonNull>(
4825         *this, IRPosition::value(*ICmp->getOperand(PtrIdx)));
4826     if (!PtrNonNullAA.isAssumedNonNull())
4827       return false;
4828 
4829     // The new value depends on the predicate, true for != and false for ==.
4830     Value *NewVal = ConstantInt::get(Type::getInt1Ty(Ctx),
4831                                      ICmp->getPredicate() == CmpInst::ICMP_NE);
4832 
4833     assert((!SimplifiedAssociatedValue.hasValue() ||
4834             SimplifiedAssociatedValue == NewVal) &&
4835            "Did not expect to change value for zero-comparison");
4836 
4837     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4838     SimplifiedAssociatedValue = NewVal;
4839 
4840     if (PtrNonNullAA.isKnownNonNull())
4841       indicateOptimisticFixpoint();
4842 
4843     Changed = HasValueBefore ? ChangeStatus::UNCHANGED : ChangeStatus ::CHANGED;
4844     return true;
4845   }
4846 
4847   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAValueSimplifyFloating4848   ChangeStatus updateImpl(Attributor &A) override {
4849     bool HasValueBefore = SimplifiedAssociatedValue.hasValue();
4850 
4851     ChangeStatus Changed;
4852     if (checkForNullPtrCompare(A, dyn_cast<ICmpInst>(&getAnchorValue()),
4853                                Changed))
4854       return Changed;
4855 
4856     auto VisitValueCB = [&](Value &V, const Instruction *CtxI, bool &,
4857                             bool Stripped) -> bool {
4858       auto &AA = A.getAAFor<AAValueSimplify>(*this, IRPosition::value(V));
4859       if (!Stripped && this == &AA) {
4860         // TODO: Look the instruction and check recursively.
4861 
4862         LLVM_DEBUG(dbgs() << "[ValueSimplify] Can't be stripped more : " << V
4863                           << "\n");
4864         return false;
4865       }
4866       return checkAndUpdate(A, *this, V, SimplifiedAssociatedValue);
4867     };
4868 
4869     bool Dummy = false;
4870     if (!genericValueTraversal<AAValueSimplify, bool>(
4871             A, getIRPosition(), *this, Dummy, VisitValueCB, getCtxI(),
4872             /* UseValueSimplify */ false))
4873       if (!askSimplifiedValueForOtherAAs(A))
4874         return indicatePessimisticFixpoint();
4875 
4876     // If a candicate was found in this update, return CHANGED.
4877 
4878     return HasValueBefore == SimplifiedAssociatedValue.hasValue()
4879                ? ChangeStatus::UNCHANGED
4880                : ChangeStatus ::CHANGED;
4881   }
4882 
4883   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAValueSimplifyFloating4884   void trackStatistics() const override {
4885     STATS_DECLTRACK_FLOATING_ATTR(value_simplify)
4886   }
4887 };
4888 
4889 struct AAValueSimplifyFunction : AAValueSimplifyImpl {
AAValueSimplifyFunction__anon0ce335530111::AAValueSimplifyFunction4890   AAValueSimplifyFunction(const IRPosition &IRP, Attributor &A)
4891       : AAValueSimplifyImpl(IRP, A) {}
4892 
4893   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAValueSimplifyFunction4894   void initialize(Attributor &A) override {
4895     SimplifiedAssociatedValue = &getAnchorValue();
4896     indicateOptimisticFixpoint();
4897   }
4898   /// See AbstractAttribute::initialize(...).
updateImpl__anon0ce335530111::AAValueSimplifyFunction4899   ChangeStatus updateImpl(Attributor &A) override {
4900     llvm_unreachable(
4901         "AAValueSimplify(Function|CallSite)::updateImpl will not be called");
4902   }
4903   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAValueSimplifyFunction4904   void trackStatistics() const override {
4905     STATS_DECLTRACK_FN_ATTR(value_simplify)
4906   }
4907 };
4908 
4909 struct AAValueSimplifyCallSite : AAValueSimplifyFunction {
AAValueSimplifyCallSite__anon0ce335530111::AAValueSimplifyCallSite4910   AAValueSimplifyCallSite(const IRPosition &IRP, Attributor &A)
4911       : AAValueSimplifyFunction(IRP, A) {}
4912   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAValueSimplifyCallSite4913   void trackStatistics() const override {
4914     STATS_DECLTRACK_CS_ATTR(value_simplify)
4915   }
4916 };
4917 
4918 struct AAValueSimplifyCallSiteReturned : AAValueSimplifyReturned {
AAValueSimplifyCallSiteReturned__anon0ce335530111::AAValueSimplifyCallSiteReturned4919   AAValueSimplifyCallSiteReturned(const IRPosition &IRP, Attributor &A)
4920       : AAValueSimplifyReturned(IRP, A) {}
4921 
4922   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAValueSimplifyCallSiteReturned4923   ChangeStatus manifest(Attributor &A) override {
4924     return AAValueSimplifyImpl::manifest(A);
4925   }
4926 
trackStatistics__anon0ce335530111::AAValueSimplifyCallSiteReturned4927   void trackStatistics() const override {
4928     STATS_DECLTRACK_CSRET_ATTR(value_simplify)
4929   }
4930 };
4931 struct AAValueSimplifyCallSiteArgument : AAValueSimplifyFloating {
AAValueSimplifyCallSiteArgument__anon0ce335530111::AAValueSimplifyCallSiteArgument4932   AAValueSimplifyCallSiteArgument(const IRPosition &IRP, Attributor &A)
4933       : AAValueSimplifyFloating(IRP, A) {}
4934 
4935   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAValueSimplifyCallSiteArgument4936   ChangeStatus manifest(Attributor &A) override {
4937     ChangeStatus Changed = ChangeStatus::UNCHANGED;
4938 
4939     if (SimplifiedAssociatedValue.hasValue() &&
4940         !SimplifiedAssociatedValue.getValue())
4941       return Changed;
4942 
4943     Value &V = getAssociatedValue();
4944     auto *C = SimplifiedAssociatedValue.hasValue()
4945                   ? dyn_cast<Constant>(SimplifiedAssociatedValue.getValue())
4946                   : UndefValue::get(V.getType());
4947     if (C) {
4948       Use &U = cast<CallBase>(&getAnchorValue())
4949                    ->getArgOperandUse(getCallSiteArgNo());
4950       // We can replace the AssociatedValue with the constant.
4951       if (&V != C && V.getType() == C->getType()) {
4952         if (A.changeUseAfterManifest(U, *C))
4953           Changed = ChangeStatus::CHANGED;
4954       }
4955     }
4956 
4957     return Changed | AAValueSimplify::manifest(A);
4958   }
4959 
trackStatistics__anon0ce335530111::AAValueSimplifyCallSiteArgument4960   void trackStatistics() const override {
4961     STATS_DECLTRACK_CSARG_ATTR(value_simplify)
4962   }
4963 };
4964 
4965 /// ----------------------- Heap-To-Stack Conversion ---------------------------
4966 struct AAHeapToStackImpl : public AAHeapToStack {
AAHeapToStackImpl__anon0ce335530111::AAHeapToStackImpl4967   AAHeapToStackImpl(const IRPosition &IRP, Attributor &A)
4968       : AAHeapToStack(IRP, A) {}
4969 
getAsStr__anon0ce335530111::AAHeapToStackImpl4970   const std::string getAsStr() const override {
4971     return "[H2S] Mallocs: " + std::to_string(MallocCalls.size());
4972   }
4973 
manifest__anon0ce335530111::AAHeapToStackImpl4974   ChangeStatus manifest(Attributor &A) override {
4975     assert(getState().isValidState() &&
4976            "Attempted to manifest an invalid state!");
4977 
4978     ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
4979     Function *F = getAnchorScope();
4980     const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
4981 
4982     for (Instruction *MallocCall : MallocCalls) {
4983       // This malloc cannot be replaced.
4984       if (BadMallocCalls.count(MallocCall))
4985         continue;
4986 
4987       for (Instruction *FreeCall : FreesForMalloc[MallocCall]) {
4988         LLVM_DEBUG(dbgs() << "H2S: Removing free call: " << *FreeCall << "\n");
4989         A.deleteAfterManifest(*FreeCall);
4990         HasChanged = ChangeStatus::CHANGED;
4991       }
4992 
4993       LLVM_DEBUG(dbgs() << "H2S: Removing malloc call: " << *MallocCall
4994                         << "\n");
4995 
4996       Align Alignment;
4997       Constant *Size;
4998       if (isCallocLikeFn(MallocCall, TLI)) {
4999         auto *Num = cast<ConstantInt>(MallocCall->getOperand(0));
5000         auto *SizeT = cast<ConstantInt>(MallocCall->getOperand(1));
5001         APInt TotalSize = SizeT->getValue() * Num->getValue();
5002         Size =
5003             ConstantInt::get(MallocCall->getOperand(0)->getType(), TotalSize);
5004       } else if (isAlignedAllocLikeFn(MallocCall, TLI)) {
5005         Size = cast<ConstantInt>(MallocCall->getOperand(1));
5006         Alignment = MaybeAlign(cast<ConstantInt>(MallocCall->getOperand(0))
5007                                    ->getValue()
5008                                    .getZExtValue())
5009                         .valueOrOne();
5010       } else {
5011         Size = cast<ConstantInt>(MallocCall->getOperand(0));
5012       }
5013 
5014       unsigned AS = cast<PointerType>(MallocCall->getType())->getAddressSpace();
5015       Instruction *AI =
5016           new AllocaInst(Type::getInt8Ty(F->getContext()), AS, Size, Alignment,
5017                          "", MallocCall->getNextNode());
5018 
5019       if (AI->getType() != MallocCall->getType())
5020         AI = new BitCastInst(AI, MallocCall->getType(), "malloc_bc",
5021                              AI->getNextNode());
5022 
5023       A.changeValueAfterManifest(*MallocCall, *AI);
5024 
5025       if (auto *II = dyn_cast<InvokeInst>(MallocCall)) {
5026         auto *NBB = II->getNormalDest();
5027         BranchInst::Create(NBB, MallocCall->getParent());
5028         A.deleteAfterManifest(*MallocCall);
5029       } else {
5030         A.deleteAfterManifest(*MallocCall);
5031       }
5032 
5033       // Zero out the allocated memory if it was a calloc.
5034       if (isCallocLikeFn(MallocCall, TLI)) {
5035         auto *BI = new BitCastInst(AI, MallocCall->getType(), "calloc_bc",
5036                                    AI->getNextNode());
5037         Value *Ops[] = {
5038             BI, ConstantInt::get(F->getContext(), APInt(8, 0, false)), Size,
5039             ConstantInt::get(Type::getInt1Ty(F->getContext()), false)};
5040 
5041         Type *Tys[] = {BI->getType(), MallocCall->getOperand(0)->getType()};
5042         Module *M = F->getParent();
5043         Function *Fn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
5044         CallInst::Create(Fn, Ops, "", BI->getNextNode());
5045       }
5046       HasChanged = ChangeStatus::CHANGED;
5047     }
5048 
5049     return HasChanged;
5050   }
5051 
5052   /// Collection of all malloc calls in a function.
5053   SmallSetVector<Instruction *, 4> MallocCalls;
5054 
5055   /// Collection of malloc calls that cannot be converted.
5056   DenseSet<const Instruction *> BadMallocCalls;
5057 
5058   /// A map for each malloc call to the set of associated free calls.
5059   DenseMap<Instruction *, SmallPtrSet<Instruction *, 4>> FreesForMalloc;
5060 
5061   ChangeStatus updateImpl(Attributor &A) override;
5062 };
5063 
updateImpl(Attributor & A)5064 ChangeStatus AAHeapToStackImpl::updateImpl(Attributor &A) {
5065   const Function *F = getAnchorScope();
5066   const auto *TLI = A.getInfoCache().getTargetLibraryInfoForFunction(*F);
5067 
5068   MustBeExecutedContextExplorer &Explorer =
5069       A.getInfoCache().getMustBeExecutedContextExplorer();
5070 
5071   auto FreeCheck = [&](Instruction &I) {
5072     const auto &Frees = FreesForMalloc.lookup(&I);
5073     if (Frees.size() != 1)
5074       return false;
5075     Instruction *UniqueFree = *Frees.begin();
5076     return Explorer.findInContextOf(UniqueFree, I.getNextNode());
5077   };
5078 
5079   auto UsesCheck = [&](Instruction &I) {
5080     bool ValidUsesOnly = true;
5081     bool MustUse = true;
5082     auto Pred = [&](const Use &U, bool &Follow) -> bool {
5083       Instruction *UserI = cast<Instruction>(U.getUser());
5084       if (isa<LoadInst>(UserI))
5085         return true;
5086       if (auto *SI = dyn_cast<StoreInst>(UserI)) {
5087         if (SI->getValueOperand() == U.get()) {
5088           LLVM_DEBUG(dbgs()
5089                      << "[H2S] escaping store to memory: " << *UserI << "\n");
5090           ValidUsesOnly = false;
5091         } else {
5092           // A store into the malloc'ed memory is fine.
5093         }
5094         return true;
5095       }
5096       if (auto *CB = dyn_cast<CallBase>(UserI)) {
5097         if (!CB->isArgOperand(&U) || CB->isLifetimeStartOrEnd())
5098           return true;
5099         // Record malloc.
5100         if (isFreeCall(UserI, TLI)) {
5101           if (MustUse) {
5102             FreesForMalloc[&I].insert(UserI);
5103           } else {
5104             LLVM_DEBUG(dbgs() << "[H2S] free potentially on different mallocs: "
5105                               << *UserI << "\n");
5106             ValidUsesOnly = false;
5107           }
5108           return true;
5109         }
5110 
5111         unsigned ArgNo = CB->getArgOperandNo(&U);
5112 
5113         const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
5114             *this, IRPosition::callsite_argument(*CB, ArgNo));
5115 
5116         // If a callsite argument use is nofree, we are fine.
5117         const auto &ArgNoFreeAA = A.getAAFor<AANoFree>(
5118             *this, IRPosition::callsite_argument(*CB, ArgNo));
5119 
5120         if (!NoCaptureAA.isAssumedNoCapture() ||
5121             !ArgNoFreeAA.isAssumedNoFree()) {
5122           LLVM_DEBUG(dbgs() << "[H2S] Bad user: " << *UserI << "\n");
5123           ValidUsesOnly = false;
5124         }
5125         return true;
5126       }
5127 
5128       if (isa<GetElementPtrInst>(UserI) || isa<BitCastInst>(UserI) ||
5129           isa<PHINode>(UserI) || isa<SelectInst>(UserI)) {
5130         MustUse &= !(isa<PHINode>(UserI) || isa<SelectInst>(UserI));
5131         Follow = true;
5132         return true;
5133       }
5134       // Unknown user for which we can not track uses further (in a way that
5135       // makes sense).
5136       LLVM_DEBUG(dbgs() << "[H2S] Unknown user: " << *UserI << "\n");
5137       ValidUsesOnly = false;
5138       return true;
5139     };
5140     A.checkForAllUses(Pred, *this, I);
5141     return ValidUsesOnly;
5142   };
5143 
5144   auto MallocCallocCheck = [&](Instruction &I) {
5145     if (BadMallocCalls.count(&I))
5146       return true;
5147 
5148     bool IsMalloc = isMallocLikeFn(&I, TLI);
5149     bool IsAlignedAllocLike = isAlignedAllocLikeFn(&I, TLI);
5150     bool IsCalloc = !IsMalloc && isCallocLikeFn(&I, TLI);
5151     if (!IsMalloc && !IsAlignedAllocLike && !IsCalloc) {
5152       BadMallocCalls.insert(&I);
5153       return true;
5154     }
5155 
5156     if (IsMalloc) {
5157       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(0)))
5158         if (Size->getValue().ule(MaxHeapToStackSize))
5159           if (UsesCheck(I) || FreeCheck(I)) {
5160             MallocCalls.insert(&I);
5161             return true;
5162           }
5163     } else if (IsAlignedAllocLike && isa<ConstantInt>(I.getOperand(0))) {
5164       // Only if the alignment and sizes are constant.
5165       if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5166         if (Size->getValue().ule(MaxHeapToStackSize))
5167           if (UsesCheck(I) || FreeCheck(I)) {
5168             MallocCalls.insert(&I);
5169             return true;
5170           }
5171     } else if (IsCalloc) {
5172       bool Overflow = false;
5173       if (auto *Num = dyn_cast<ConstantInt>(I.getOperand(0)))
5174         if (auto *Size = dyn_cast<ConstantInt>(I.getOperand(1)))
5175           if ((Size->getValue().umul_ov(Num->getValue(), Overflow))
5176                   .ule(MaxHeapToStackSize))
5177             if (!Overflow && (UsesCheck(I) || FreeCheck(I))) {
5178               MallocCalls.insert(&I);
5179               return true;
5180             }
5181     }
5182 
5183     BadMallocCalls.insert(&I);
5184     return true;
5185   };
5186 
5187   size_t NumBadMallocs = BadMallocCalls.size();
5188 
5189   A.checkForAllCallLikeInstructions(MallocCallocCheck, *this);
5190 
5191   if (NumBadMallocs != BadMallocCalls.size())
5192     return ChangeStatus::CHANGED;
5193 
5194   return ChangeStatus::UNCHANGED;
5195 }
5196 
5197 struct AAHeapToStackFunction final : public AAHeapToStackImpl {
AAHeapToStackFunction__anon0ce335530111::AAHeapToStackFunction5198   AAHeapToStackFunction(const IRPosition &IRP, Attributor &A)
5199       : AAHeapToStackImpl(IRP, A) {}
5200 
5201   /// See AbstractAttribute::trackStatistics().
trackStatistics__anon0ce335530111::AAHeapToStackFunction5202   void trackStatistics() const override {
5203     STATS_DECL(
5204         MallocCalls, Function,
5205         "Number of malloc/calloc/aligned_alloc calls converted to allocas");
5206     for (auto *C : MallocCalls)
5207       if (!BadMallocCalls.count(C))
5208         ++BUILD_STAT_NAME(MallocCalls, Function);
5209   }
5210 };
5211 
5212 /// ----------------------- Privatizable Pointers ------------------------------
5213 struct AAPrivatizablePtrImpl : public AAPrivatizablePtr {
AAPrivatizablePtrImpl__anon0ce335530111::AAPrivatizablePtrImpl5214   AAPrivatizablePtrImpl(const IRPosition &IRP, Attributor &A)
5215       : AAPrivatizablePtr(IRP, A), PrivatizableType(llvm::None) {}
5216 
indicatePessimisticFixpoint__anon0ce335530111::AAPrivatizablePtrImpl5217   ChangeStatus indicatePessimisticFixpoint() override {
5218     AAPrivatizablePtr::indicatePessimisticFixpoint();
5219     PrivatizableType = nullptr;
5220     return ChangeStatus::CHANGED;
5221   }
5222 
5223   /// Identify the type we can chose for a private copy of the underlying
5224   /// argument. None means it is not clear yet, nullptr means there is none.
5225   virtual Optional<Type *> identifyPrivatizableType(Attributor &A) = 0;
5226 
5227   /// Return a privatizable type that encloses both T0 and T1.
5228   /// TODO: This is merely a stub for now as we should manage a mapping as well.
combineTypes__anon0ce335530111::AAPrivatizablePtrImpl5229   Optional<Type *> combineTypes(Optional<Type *> T0, Optional<Type *> T1) {
5230     if (!T0.hasValue())
5231       return T1;
5232     if (!T1.hasValue())
5233       return T0;
5234     if (T0 == T1)
5235       return T0;
5236     return nullptr;
5237   }
5238 
getPrivatizableType__anon0ce335530111::AAPrivatizablePtrImpl5239   Optional<Type *> getPrivatizableType() const override {
5240     return PrivatizableType;
5241   }
5242 
getAsStr__anon0ce335530111::AAPrivatizablePtrImpl5243   const std::string getAsStr() const override {
5244     return isAssumedPrivatizablePtr() ? "[priv]" : "[no-priv]";
5245   }
5246 
5247 protected:
5248   Optional<Type *> PrivatizableType;
5249 };
5250 
5251 // TODO: Do this for call site arguments (probably also other values) as well.
5252 
5253 struct AAPrivatizablePtrArgument final : public AAPrivatizablePtrImpl {
AAPrivatizablePtrArgument__anon0ce335530111::AAPrivatizablePtrArgument5254   AAPrivatizablePtrArgument(const IRPosition &IRP, Attributor &A)
5255       : AAPrivatizablePtrImpl(IRP, A) {}
5256 
5257   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
identifyPrivatizableType__anon0ce335530111::AAPrivatizablePtrArgument5258   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5259     // If this is a byval argument and we know all the call sites (so we can
5260     // rewrite them), there is no need to check them explicitly.
5261     bool AllCallSitesKnown;
5262     if (getIRPosition().hasAttr(Attribute::ByVal) &&
5263         A.checkForAllCallSites([](AbstractCallSite ACS) { return true; }, *this,
5264                                true, AllCallSitesKnown))
5265       return getAssociatedValue().getType()->getPointerElementType();
5266 
5267     Optional<Type *> Ty;
5268     unsigned ArgNo = getIRPosition().getCallSiteArgNo();
5269 
5270     // Make sure the associated call site argument has the same type at all call
5271     // sites and it is an allocation we know is safe to privatize, for now that
5272     // means we only allow alloca instructions.
5273     // TODO: We can additionally analyze the accesses in the callee to  create
5274     //       the type from that information instead. That is a little more
5275     //       involved and will be done in a follow up patch.
5276     auto CallSiteCheck = [&](AbstractCallSite ACS) {
5277       IRPosition ACSArgPos = IRPosition::callsite_argument(ACS, ArgNo);
5278       // Check if a coresponding argument was found or if it is one not
5279       // associated (which can happen for callback calls).
5280       if (ACSArgPos.getPositionKind() == IRPosition::IRP_INVALID)
5281         return false;
5282 
5283       // Check that all call sites agree on a type.
5284       auto &PrivCSArgAA = A.getAAFor<AAPrivatizablePtr>(*this, ACSArgPos);
5285       Optional<Type *> CSTy = PrivCSArgAA.getPrivatizableType();
5286 
5287       LLVM_DEBUG({
5288         dbgs() << "[AAPrivatizablePtr] ACSPos: " << ACSArgPos << ", CSTy: ";
5289         if (CSTy.hasValue() && CSTy.getValue())
5290           CSTy.getValue()->print(dbgs());
5291         else if (CSTy.hasValue())
5292           dbgs() << "<nullptr>";
5293         else
5294           dbgs() << "<none>";
5295       });
5296 
5297       Ty = combineTypes(Ty, CSTy);
5298 
5299       LLVM_DEBUG({
5300         dbgs() << " : New Type: ";
5301         if (Ty.hasValue() && Ty.getValue())
5302           Ty.getValue()->print(dbgs());
5303         else if (Ty.hasValue())
5304           dbgs() << "<nullptr>";
5305         else
5306           dbgs() << "<none>";
5307         dbgs() << "\n";
5308       });
5309 
5310       return !Ty.hasValue() || Ty.getValue();
5311     };
5312 
5313     if (!A.checkForAllCallSites(CallSiteCheck, *this, true, AllCallSitesKnown))
5314       return nullptr;
5315     return Ty;
5316   }
5317 
5318   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAPrivatizablePtrArgument5319   ChangeStatus updateImpl(Attributor &A) override {
5320     PrivatizableType = identifyPrivatizableType(A);
5321     if (!PrivatizableType.hasValue())
5322       return ChangeStatus::UNCHANGED;
5323     if (!PrivatizableType.getValue())
5324       return indicatePessimisticFixpoint();
5325 
5326     // The dependence is optional so we don't give up once we give up on the
5327     // alignment.
5328     A.getAAFor<AAAlign>(*this, IRPosition::value(getAssociatedValue()),
5329                         /* TrackDependence */ true, DepClassTy::OPTIONAL);
5330 
5331     // Avoid arguments with padding for now.
5332     if (!getIRPosition().hasAttr(Attribute::ByVal) &&
5333         !ArgumentPromotionPass::isDenselyPacked(PrivatizableType.getValue(),
5334                                                 A.getInfoCache().getDL())) {
5335       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Padding detected\n");
5336       return indicatePessimisticFixpoint();
5337     }
5338 
5339     // Verify callee and caller agree on how the promoted argument would be
5340     // passed.
5341     // TODO: The use of the ArgumentPromotion interface here is ugly, we need a
5342     // specialized form of TargetTransformInfo::areFunctionArgsABICompatible
5343     // which doesn't require the arguments ArgumentPromotion wanted to pass.
5344     Function &Fn = *getIRPosition().getAnchorScope();
5345     SmallPtrSet<Argument *, 1> ArgsToPromote, Dummy;
5346     ArgsToPromote.insert(getAssociatedArgument());
5347     const auto *TTI =
5348         A.getInfoCache().getAnalysisResultForFunction<TargetIRAnalysis>(Fn);
5349     if (!TTI ||
5350         !ArgumentPromotionPass::areFunctionArgsABICompatible(
5351             Fn, *TTI, ArgsToPromote, Dummy) ||
5352         ArgsToPromote.empty()) {
5353       LLVM_DEBUG(
5354           dbgs() << "[AAPrivatizablePtr] ABI incompatibility detected for "
5355                  << Fn.getName() << "\n");
5356       return indicatePessimisticFixpoint();
5357     }
5358 
5359     // Collect the types that will replace the privatizable type in the function
5360     // signature.
5361     SmallVector<Type *, 16> ReplacementTypes;
5362     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5363 
5364     // Register a rewrite of the argument.
5365     Argument *Arg = getAssociatedArgument();
5366     if (!A.isValidFunctionSignatureRewrite(*Arg, ReplacementTypes)) {
5367       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Rewrite not valid\n");
5368       return indicatePessimisticFixpoint();
5369     }
5370 
5371     unsigned ArgNo = Arg->getArgNo();
5372 
5373     // Helper to check if for the given call site the associated argument is
5374     // passed to a callback where the privatization would be different.
5375     auto IsCompatiblePrivArgOfCallback = [&](CallBase &CB) {
5376       SmallVector<const Use *, 4> CallbackUses;
5377       AbstractCallSite::getCallbackUses(CB, CallbackUses);
5378       for (const Use *U : CallbackUses) {
5379         AbstractCallSite CBACS(U);
5380         assert(CBACS && CBACS.isCallbackCall());
5381         for (Argument &CBArg : CBACS.getCalledFunction()->args()) {
5382           int CBArgNo = CBACS.getCallArgOperandNo(CBArg);
5383 
5384           LLVM_DEBUG({
5385             dbgs()
5386                 << "[AAPrivatizablePtr] Argument " << *Arg
5387                 << "check if can be privatized in the context of its parent ("
5388                 << Arg->getParent()->getName()
5389                 << ")\n[AAPrivatizablePtr] because it is an argument in a "
5390                    "callback ("
5391                 << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5392                 << ")\n[AAPrivatizablePtr] " << CBArg << " : "
5393                 << CBACS.getCallArgOperand(CBArg) << " vs "
5394                 << CB.getArgOperand(ArgNo) << "\n"
5395                 << "[AAPrivatizablePtr] " << CBArg << " : "
5396                 << CBACS.getCallArgOperandNo(CBArg) << " vs " << ArgNo << "\n";
5397           });
5398 
5399           if (CBArgNo != int(ArgNo))
5400             continue;
5401           const auto &CBArgPrivAA =
5402               A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(CBArg));
5403           if (CBArgPrivAA.isValidState()) {
5404             auto CBArgPrivTy = CBArgPrivAA.getPrivatizableType();
5405             if (!CBArgPrivTy.hasValue())
5406               continue;
5407             if (CBArgPrivTy.getValue() == PrivatizableType)
5408               continue;
5409           }
5410 
5411           LLVM_DEBUG({
5412             dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5413                    << " cannot be privatized in the context of its parent ("
5414                    << Arg->getParent()->getName()
5415                    << ")\n[AAPrivatizablePtr] because it is an argument in a "
5416                       "callback ("
5417                    << CBArgNo << "@" << CBACS.getCalledFunction()->getName()
5418                    << ").\n[AAPrivatizablePtr] for which the argument "
5419                       "privatization is not compatible.\n";
5420           });
5421           return false;
5422         }
5423       }
5424       return true;
5425     };
5426 
5427     // Helper to check if for the given call site the associated argument is
5428     // passed to a direct call where the privatization would be different.
5429     auto IsCompatiblePrivArgOfDirectCS = [&](AbstractCallSite ACS) {
5430       CallBase *DC = cast<CallBase>(ACS.getInstruction());
5431       int DCArgNo = ACS.getCallArgOperandNo(ArgNo);
5432       assert(DCArgNo >= 0 && unsigned(DCArgNo) < DC->getNumArgOperands() &&
5433              "Expected a direct call operand for callback call operand");
5434 
5435       LLVM_DEBUG({
5436         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5437                << " check if be privatized in the context of its parent ("
5438                << Arg->getParent()->getName()
5439                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5440                   "direct call of ("
5441                << DCArgNo << "@" << DC->getCalledFunction()->getName()
5442                << ").\n";
5443       });
5444 
5445       Function *DCCallee = DC->getCalledFunction();
5446       if (unsigned(DCArgNo) < DCCallee->arg_size()) {
5447         const auto &DCArgPrivAA = A.getAAFor<AAPrivatizablePtr>(
5448             *this, IRPosition::argument(*DCCallee->getArg(DCArgNo)));
5449         if (DCArgPrivAA.isValidState()) {
5450           auto DCArgPrivTy = DCArgPrivAA.getPrivatizableType();
5451           if (!DCArgPrivTy.hasValue())
5452             return true;
5453           if (DCArgPrivTy.getValue() == PrivatizableType)
5454             return true;
5455         }
5456       }
5457 
5458       LLVM_DEBUG({
5459         dbgs() << "[AAPrivatizablePtr] Argument " << *Arg
5460                << " cannot be privatized in the context of its parent ("
5461                << Arg->getParent()->getName()
5462                << ")\n[AAPrivatizablePtr] because it is an argument in a "
5463                   "direct call of ("
5464                << ACS.getInstruction()->getCalledFunction()->getName()
5465                << ").\n[AAPrivatizablePtr] for which the argument "
5466                   "privatization is not compatible.\n";
5467       });
5468       return false;
5469     };
5470 
5471     // Helper to check if the associated argument is used at the given abstract
5472     // call site in a way that is incompatible with the privatization assumed
5473     // here.
5474     auto IsCompatiblePrivArgOfOtherCallSite = [&](AbstractCallSite ACS) {
5475       if (ACS.isDirectCall())
5476         return IsCompatiblePrivArgOfCallback(*ACS.getInstruction());
5477       if (ACS.isCallbackCall())
5478         return IsCompatiblePrivArgOfDirectCS(ACS);
5479       return false;
5480     };
5481 
5482     bool AllCallSitesKnown;
5483     if (!A.checkForAllCallSites(IsCompatiblePrivArgOfOtherCallSite, *this, true,
5484                                 AllCallSitesKnown))
5485       return indicatePessimisticFixpoint();
5486 
5487     return ChangeStatus::UNCHANGED;
5488   }
5489 
5490   /// Given a type to private \p PrivType, collect the constituates (which are
5491   /// used) in \p ReplacementTypes.
5492   static void
identifyReplacementTypes__anon0ce335530111::AAPrivatizablePtrArgument5493   identifyReplacementTypes(Type *PrivType,
5494                            SmallVectorImpl<Type *> &ReplacementTypes) {
5495     // TODO: For now we expand the privatization type to the fullest which can
5496     //       lead to dead arguments that need to be removed later.
5497     assert(PrivType && "Expected privatizable type!");
5498 
5499     // Traverse the type, extract constituate types on the outermost level.
5500     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5501       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++)
5502         ReplacementTypes.push_back(PrivStructType->getElementType(u));
5503     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5504       ReplacementTypes.append(PrivArrayType->getNumElements(),
5505                               PrivArrayType->getElementType());
5506     } else {
5507       ReplacementTypes.push_back(PrivType);
5508     }
5509   }
5510 
5511   /// Initialize \p Base according to the type \p PrivType at position \p IP.
5512   /// The values needed are taken from the arguments of \p F starting at
5513   /// position \p ArgNo.
createInitialization__anon0ce335530111::AAPrivatizablePtrArgument5514   static void createInitialization(Type *PrivType, Value &Base, Function &F,
5515                                    unsigned ArgNo, Instruction &IP) {
5516     assert(PrivType && "Expected privatizable type!");
5517 
5518     IRBuilder<NoFolder> IRB(&IP);
5519     const DataLayout &DL = F.getParent()->getDataLayout();
5520 
5521     // Traverse the type, build GEPs and stores.
5522     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5523       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5524       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5525         Type *PointeeTy = PrivStructType->getElementType(u)->getPointerTo();
5526         Value *Ptr = constructPointer(
5527             PointeeTy, &Base, PrivStructLayout->getElementOffset(u), IRB, DL);
5528         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5529       }
5530     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5531       Type *PointeeTy = PrivArrayType->getElementType();
5532       Type *PointeePtrTy = PointeeTy->getPointerTo();
5533       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5534       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5535         Value *Ptr =
5536             constructPointer(PointeePtrTy, &Base, u * PointeeTySize, IRB, DL);
5537         new StoreInst(F.getArg(ArgNo + u), Ptr, &IP);
5538       }
5539     } else {
5540       new StoreInst(F.getArg(ArgNo), &Base, &IP);
5541     }
5542   }
5543 
5544   /// Extract values from \p Base according to the type \p PrivType at the
5545   /// call position \p ACS. The values are appended to \p ReplacementValues.
createReplacementValues__anon0ce335530111::AAPrivatizablePtrArgument5546   void createReplacementValues(Align Alignment, Type *PrivType,
5547                                AbstractCallSite ACS, Value *Base,
5548                                SmallVectorImpl<Value *> &ReplacementValues) {
5549     assert(Base && "Expected base value!");
5550     assert(PrivType && "Expected privatizable type!");
5551     Instruction *IP = ACS.getInstruction();
5552 
5553     IRBuilder<NoFolder> IRB(IP);
5554     const DataLayout &DL = IP->getModule()->getDataLayout();
5555 
5556     if (Base->getType()->getPointerElementType() != PrivType)
5557       Base = BitCastInst::CreateBitOrPointerCast(Base, PrivType->getPointerTo(),
5558                                                  "", ACS.getInstruction());
5559 
5560     // Traverse the type, build GEPs and loads.
5561     if (auto *PrivStructType = dyn_cast<StructType>(PrivType)) {
5562       const StructLayout *PrivStructLayout = DL.getStructLayout(PrivStructType);
5563       for (unsigned u = 0, e = PrivStructType->getNumElements(); u < e; u++) {
5564         Type *PointeeTy = PrivStructType->getElementType(u);
5565         Value *Ptr =
5566             constructPointer(PointeeTy->getPointerTo(), Base,
5567                              PrivStructLayout->getElementOffset(u), IRB, DL);
5568         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5569         L->setAlignment(Alignment);
5570         ReplacementValues.push_back(L);
5571       }
5572     } else if (auto *PrivArrayType = dyn_cast<ArrayType>(PrivType)) {
5573       Type *PointeeTy = PrivArrayType->getElementType();
5574       uint64_t PointeeTySize = DL.getTypeStoreSize(PointeeTy);
5575       Type *PointeePtrTy = PointeeTy->getPointerTo();
5576       for (unsigned u = 0, e = PrivArrayType->getNumElements(); u < e; u++) {
5577         Value *Ptr =
5578             constructPointer(PointeePtrTy, Base, u * PointeeTySize, IRB, DL);
5579         LoadInst *L = new LoadInst(PointeeTy, Ptr, "", IP);
5580         L->setAlignment(Alignment);
5581         ReplacementValues.push_back(L);
5582       }
5583     } else {
5584       LoadInst *L = new LoadInst(PrivType, Base, "", IP);
5585       L->setAlignment(Alignment);
5586       ReplacementValues.push_back(L);
5587     }
5588   }
5589 
5590   /// See AbstractAttribute::manifest(...)
manifest__anon0ce335530111::AAPrivatizablePtrArgument5591   ChangeStatus manifest(Attributor &A) override {
5592     if (!PrivatizableType.hasValue())
5593       return ChangeStatus::UNCHANGED;
5594     assert(PrivatizableType.getValue() && "Expected privatizable type!");
5595 
5596     // Collect all tail calls in the function as we cannot allow new allocas to
5597     // escape into tail recursion.
5598     // TODO: Be smarter about new allocas escaping into tail calls.
5599     SmallVector<CallInst *, 16> TailCalls;
5600     if (!A.checkForAllInstructions(
5601             [&](Instruction &I) {
5602               CallInst &CI = cast<CallInst>(I);
5603               if (CI.isTailCall())
5604                 TailCalls.push_back(&CI);
5605               return true;
5606             },
5607             *this, {Instruction::Call}))
5608       return ChangeStatus::UNCHANGED;
5609 
5610     Argument *Arg = getAssociatedArgument();
5611     // Query AAAlign attribute for alignment of associated argument to
5612     // determine the best alignment of loads.
5613     const auto &AlignAA = A.getAAFor<AAAlign>(*this, IRPosition::value(*Arg));
5614 
5615     // Callback to repair the associated function. A new alloca is placed at the
5616     // beginning and initialized with the values passed through arguments. The
5617     // new alloca replaces the use of the old pointer argument.
5618     Attributor::ArgumentReplacementInfo::CalleeRepairCBTy FnRepairCB =
5619         [=](const Attributor::ArgumentReplacementInfo &ARI,
5620             Function &ReplacementFn, Function::arg_iterator ArgIt) {
5621           BasicBlock &EntryBB = ReplacementFn.getEntryBlock();
5622           Instruction *IP = &*EntryBB.getFirstInsertionPt();
5623           Instruction *AI = new AllocaInst(PrivatizableType.getValue(), 0,
5624                                            Arg->getName() + ".priv", IP);
5625           createInitialization(PrivatizableType.getValue(), *AI, ReplacementFn,
5626                                ArgIt->getArgNo(), *IP);
5627 
5628           if (AI->getType() != Arg->getType())
5629             AI =
5630                 BitCastInst::CreateBitOrPointerCast(AI, Arg->getType(), "", IP);
5631           Arg->replaceAllUsesWith(AI);
5632 
5633           for (CallInst *CI : TailCalls)
5634             CI->setTailCall(false);
5635         };
5636 
5637     // Callback to repair a call site of the associated function. The elements
5638     // of the privatizable type are loaded prior to the call and passed to the
5639     // new function version.
5640     Attributor::ArgumentReplacementInfo::ACSRepairCBTy ACSRepairCB =
5641         [=, &AlignAA](const Attributor::ArgumentReplacementInfo &ARI,
5642                       AbstractCallSite ACS,
5643                       SmallVectorImpl<Value *> &NewArgOperands) {
5644           // When no alignment is specified for the load instruction,
5645           // natural alignment is assumed.
5646           createReplacementValues(
5647               assumeAligned(AlignAA.getAssumedAlign()),
5648               PrivatizableType.getValue(), ACS,
5649               ACS.getCallArgOperand(ARI.getReplacedArg().getArgNo()),
5650               NewArgOperands);
5651         };
5652 
5653     // Collect the types that will replace the privatizable type in the function
5654     // signature.
5655     SmallVector<Type *, 16> ReplacementTypes;
5656     identifyReplacementTypes(PrivatizableType.getValue(), ReplacementTypes);
5657 
5658     // Register a rewrite of the argument.
5659     if (A.registerFunctionSignatureRewrite(*Arg, ReplacementTypes,
5660                                            std::move(FnRepairCB),
5661                                            std::move(ACSRepairCB)))
5662       return ChangeStatus::CHANGED;
5663     return ChangeStatus::UNCHANGED;
5664   }
5665 
5666   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAPrivatizablePtrArgument5667   void trackStatistics() const override {
5668     STATS_DECLTRACK_ARG_ATTR(privatizable_ptr);
5669   }
5670 };
5671 
5672 struct AAPrivatizablePtrFloating : public AAPrivatizablePtrImpl {
AAPrivatizablePtrFloating__anon0ce335530111::AAPrivatizablePtrFloating5673   AAPrivatizablePtrFloating(const IRPosition &IRP, Attributor &A)
5674       : AAPrivatizablePtrImpl(IRP, A) {}
5675 
5676   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAPrivatizablePtrFloating5677   virtual void initialize(Attributor &A) override {
5678     // TODO: We can privatize more than arguments.
5679     indicatePessimisticFixpoint();
5680   }
5681 
updateImpl__anon0ce335530111::AAPrivatizablePtrFloating5682   ChangeStatus updateImpl(Attributor &A) override {
5683     llvm_unreachable("AAPrivatizablePtr(Floating|Returned|CallSiteReturned)::"
5684                      "updateImpl will not be called");
5685   }
5686 
5687   /// See AAPrivatizablePtrImpl::identifyPrivatizableType(...)
identifyPrivatizableType__anon0ce335530111::AAPrivatizablePtrFloating5688   Optional<Type *> identifyPrivatizableType(Attributor &A) override {
5689     Value *Obj = getUnderlyingObject(&getAssociatedValue());
5690     if (!Obj) {
5691       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] No underlying object found!\n");
5692       return nullptr;
5693     }
5694 
5695     if (auto *AI = dyn_cast<AllocaInst>(Obj))
5696       if (auto *CI = dyn_cast<ConstantInt>(AI->getArraySize()))
5697         if (CI->isOne())
5698           return Obj->getType()->getPointerElementType();
5699     if (auto *Arg = dyn_cast<Argument>(Obj)) {
5700       auto &PrivArgAA =
5701           A.getAAFor<AAPrivatizablePtr>(*this, IRPosition::argument(*Arg));
5702       if (PrivArgAA.isAssumedPrivatizablePtr())
5703         return Obj->getType()->getPointerElementType();
5704     }
5705 
5706     LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] Underlying object neither valid "
5707                          "alloca nor privatizable argument: "
5708                       << *Obj << "!\n");
5709     return nullptr;
5710   }
5711 
5712   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAPrivatizablePtrFloating5713   void trackStatistics() const override {
5714     STATS_DECLTRACK_FLOATING_ATTR(privatizable_ptr);
5715   }
5716 };
5717 
5718 struct AAPrivatizablePtrCallSiteArgument final
5719     : public AAPrivatizablePtrFloating {
AAPrivatizablePtrCallSiteArgument__anon0ce335530111::AAPrivatizablePtrCallSiteArgument5720   AAPrivatizablePtrCallSiteArgument(const IRPosition &IRP, Attributor &A)
5721       : AAPrivatizablePtrFloating(IRP, A) {}
5722 
5723   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAPrivatizablePtrCallSiteArgument5724   void initialize(Attributor &A) override {
5725     if (getIRPosition().hasAttr(Attribute::ByVal))
5726       indicateOptimisticFixpoint();
5727   }
5728 
5729   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAPrivatizablePtrCallSiteArgument5730   ChangeStatus updateImpl(Attributor &A) override {
5731     PrivatizableType = identifyPrivatizableType(A);
5732     if (!PrivatizableType.hasValue())
5733       return ChangeStatus::UNCHANGED;
5734     if (!PrivatizableType.getValue())
5735       return indicatePessimisticFixpoint();
5736 
5737     const IRPosition &IRP = getIRPosition();
5738     auto &NoCaptureAA = A.getAAFor<AANoCapture>(*this, IRP);
5739     if (!NoCaptureAA.isAssumedNoCapture()) {
5740       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might be captured!\n");
5741       return indicatePessimisticFixpoint();
5742     }
5743 
5744     auto &NoAliasAA = A.getAAFor<AANoAlias>(*this, IRP);
5745     if (!NoAliasAA.isAssumedNoAlias()) {
5746       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer might alias!\n");
5747       return indicatePessimisticFixpoint();
5748     }
5749 
5750     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(*this, IRP);
5751     if (!MemBehaviorAA.isAssumedReadOnly()) {
5752       LLVM_DEBUG(dbgs() << "[AAPrivatizablePtr] pointer is written!\n");
5753       return indicatePessimisticFixpoint();
5754     }
5755 
5756     return ChangeStatus::UNCHANGED;
5757   }
5758 
5759   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAPrivatizablePtrCallSiteArgument5760   void trackStatistics() const override {
5761     STATS_DECLTRACK_CSARG_ATTR(privatizable_ptr);
5762   }
5763 };
5764 
5765 struct AAPrivatizablePtrCallSiteReturned final
5766     : public AAPrivatizablePtrFloating {
AAPrivatizablePtrCallSiteReturned__anon0ce335530111::AAPrivatizablePtrCallSiteReturned5767   AAPrivatizablePtrCallSiteReturned(const IRPosition &IRP, Attributor &A)
5768       : AAPrivatizablePtrFloating(IRP, A) {}
5769 
5770   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAPrivatizablePtrCallSiteReturned5771   void initialize(Attributor &A) override {
5772     // TODO: We can privatize more than arguments.
5773     indicatePessimisticFixpoint();
5774   }
5775 
5776   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAPrivatizablePtrCallSiteReturned5777   void trackStatistics() const override {
5778     STATS_DECLTRACK_CSRET_ATTR(privatizable_ptr);
5779   }
5780 };
5781 
5782 struct AAPrivatizablePtrReturned final : public AAPrivatizablePtrFloating {
AAPrivatizablePtrReturned__anon0ce335530111::AAPrivatizablePtrReturned5783   AAPrivatizablePtrReturned(const IRPosition &IRP, Attributor &A)
5784       : AAPrivatizablePtrFloating(IRP, A) {}
5785 
5786   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAPrivatizablePtrReturned5787   void initialize(Attributor &A) override {
5788     // TODO: We can privatize more than arguments.
5789     indicatePessimisticFixpoint();
5790   }
5791 
5792   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAPrivatizablePtrReturned5793   void trackStatistics() const override {
5794     STATS_DECLTRACK_FNRET_ATTR(privatizable_ptr);
5795   }
5796 };
5797 
5798 /// -------------------- Memory Behavior Attributes ----------------------------
5799 /// Includes read-none, read-only, and write-only.
5800 /// ----------------------------------------------------------------------------
5801 struct AAMemoryBehaviorImpl : public AAMemoryBehavior {
AAMemoryBehaviorImpl__anon0ce335530111::AAMemoryBehaviorImpl5802   AAMemoryBehaviorImpl(const IRPosition &IRP, Attributor &A)
5803       : AAMemoryBehavior(IRP, A) {}
5804 
5805   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAMemoryBehaviorImpl5806   void initialize(Attributor &A) override {
5807     intersectAssumedBits(BEST_STATE);
5808     getKnownStateFromValue(getIRPosition(), getState());
5809     AAMemoryBehavior::initialize(A);
5810   }
5811 
5812   /// Return the memory behavior information encoded in the IR for \p IRP.
getKnownStateFromValue__anon0ce335530111::AAMemoryBehaviorImpl5813   static void getKnownStateFromValue(const IRPosition &IRP,
5814                                      BitIntegerState &State,
5815                                      bool IgnoreSubsumingPositions = false) {
5816     SmallVector<Attribute, 2> Attrs;
5817     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
5818     for (const Attribute &Attr : Attrs) {
5819       switch (Attr.getKindAsEnum()) {
5820       case Attribute::ReadNone:
5821         State.addKnownBits(NO_ACCESSES);
5822         break;
5823       case Attribute::ReadOnly:
5824         State.addKnownBits(NO_WRITES);
5825         break;
5826       case Attribute::WriteOnly:
5827         State.addKnownBits(NO_READS);
5828         break;
5829       default:
5830         llvm_unreachable("Unexpected attribute!");
5831       }
5832     }
5833 
5834     if (auto *I = dyn_cast<Instruction>(&IRP.getAnchorValue())) {
5835       if (!I->mayReadFromMemory())
5836         State.addKnownBits(NO_READS);
5837       if (!I->mayWriteToMemory())
5838         State.addKnownBits(NO_WRITES);
5839     }
5840   }
5841 
5842   /// See AbstractAttribute::getDeducedAttributes(...).
getDeducedAttributes__anon0ce335530111::AAMemoryBehaviorImpl5843   void getDeducedAttributes(LLVMContext &Ctx,
5844                             SmallVectorImpl<Attribute> &Attrs) const override {
5845     assert(Attrs.size() == 0);
5846     if (isAssumedReadNone())
5847       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
5848     else if (isAssumedReadOnly())
5849       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadOnly));
5850     else if (isAssumedWriteOnly())
5851       Attrs.push_back(Attribute::get(Ctx, Attribute::WriteOnly));
5852     assert(Attrs.size() <= 1);
5853   }
5854 
5855   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAMemoryBehaviorImpl5856   ChangeStatus manifest(Attributor &A) override {
5857     if (hasAttr(Attribute::ReadNone, /* IgnoreSubsumingPositions */ true))
5858       return ChangeStatus::UNCHANGED;
5859 
5860     const IRPosition &IRP = getIRPosition();
5861 
5862     // Check if we would improve the existing attributes first.
5863     SmallVector<Attribute, 4> DeducedAttrs;
5864     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
5865     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
5866           return IRP.hasAttr(Attr.getKindAsEnum(),
5867                              /* IgnoreSubsumingPositions */ true);
5868         }))
5869       return ChangeStatus::UNCHANGED;
5870 
5871     // Clear existing attributes.
5872     IRP.removeAttrs(AttrKinds);
5873 
5874     // Use the generic manifest method.
5875     return IRAttribute::manifest(A);
5876   }
5877 
5878   /// See AbstractState::getAsStr().
getAsStr__anon0ce335530111::AAMemoryBehaviorImpl5879   const std::string getAsStr() const override {
5880     if (isAssumedReadNone())
5881       return "readnone";
5882     if (isAssumedReadOnly())
5883       return "readonly";
5884     if (isAssumedWriteOnly())
5885       return "writeonly";
5886     return "may-read/write";
5887   }
5888 
5889   /// The set of IR attributes AAMemoryBehavior deals with.
5890   static const Attribute::AttrKind AttrKinds[3];
5891 };
5892 
5893 const Attribute::AttrKind AAMemoryBehaviorImpl::AttrKinds[] = {
5894     Attribute::ReadNone, Attribute::ReadOnly, Attribute::WriteOnly};
5895 
5896 /// Memory behavior attribute for a floating value.
5897 struct AAMemoryBehaviorFloating : AAMemoryBehaviorImpl {
AAMemoryBehaviorFloating__anon0ce335530111::AAMemoryBehaviorFloating5898   AAMemoryBehaviorFloating(const IRPosition &IRP, Attributor &A)
5899       : AAMemoryBehaviorImpl(IRP, A) {}
5900 
5901   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAMemoryBehaviorFloating5902   void initialize(Attributor &A) override {
5903     AAMemoryBehaviorImpl::initialize(A);
5904     addUsesOf(A, getAssociatedValue());
5905   }
5906 
5907   /// See AbstractAttribute::updateImpl(...).
5908   ChangeStatus updateImpl(Attributor &A) override;
5909 
5910   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAMemoryBehaviorFloating5911   void trackStatistics() const override {
5912     if (isAssumedReadNone())
5913       STATS_DECLTRACK_FLOATING_ATTR(readnone)
5914     else if (isAssumedReadOnly())
5915       STATS_DECLTRACK_FLOATING_ATTR(readonly)
5916     else if (isAssumedWriteOnly())
5917       STATS_DECLTRACK_FLOATING_ATTR(writeonly)
5918   }
5919 
5920 private:
5921   /// Return true if users of \p UserI might access the underlying
5922   /// variable/location described by \p U and should therefore be analyzed.
5923   bool followUsersOfUseIn(Attributor &A, const Use *U,
5924                           const Instruction *UserI);
5925 
5926   /// Update the state according to the effect of use \p U in \p UserI.
5927   void analyzeUseIn(Attributor &A, const Use *U, const Instruction *UserI);
5928 
5929 protected:
5930   /// Add the uses of \p V to the `Uses` set we look at during the update step.
5931   void addUsesOf(Attributor &A, const Value &V);
5932 
5933   /// Container for (transitive) uses of the associated argument.
5934   SmallVector<const Use *, 8> Uses;
5935 
5936   /// Set to remember the uses we already traversed.
5937   SmallPtrSet<const Use *, 8> Visited;
5938 };
5939 
5940 /// Memory behavior attribute for function argument.
5941 struct AAMemoryBehaviorArgument : AAMemoryBehaviorFloating {
AAMemoryBehaviorArgument__anon0ce335530111::AAMemoryBehaviorArgument5942   AAMemoryBehaviorArgument(const IRPosition &IRP, Attributor &A)
5943       : AAMemoryBehaviorFloating(IRP, A) {}
5944 
5945   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAMemoryBehaviorArgument5946   void initialize(Attributor &A) override {
5947     intersectAssumedBits(BEST_STATE);
5948     const IRPosition &IRP = getIRPosition();
5949     // TODO: Make IgnoreSubsumingPositions a property of an IRAttribute so we
5950     // can query it when we use has/getAttr. That would allow us to reuse the
5951     // initialize of the base class here.
5952     bool HasByVal =
5953         IRP.hasAttr({Attribute::ByVal}, /* IgnoreSubsumingPositions */ true);
5954     getKnownStateFromValue(IRP, getState(),
5955                            /* IgnoreSubsumingPositions */ HasByVal);
5956 
5957     // Initialize the use vector with all direct uses of the associated value.
5958     Argument *Arg = getAssociatedArgument();
5959     if (!Arg || !A.isFunctionIPOAmendable(*(Arg->getParent()))) {
5960       indicatePessimisticFixpoint();
5961     } else {
5962       addUsesOf(A, *Arg);
5963     }
5964   }
5965 
manifest__anon0ce335530111::AAMemoryBehaviorArgument5966   ChangeStatus manifest(Attributor &A) override {
5967     // TODO: Pointer arguments are not supported on vectors of pointers yet.
5968     if (!getAssociatedValue().getType()->isPointerTy())
5969       return ChangeStatus::UNCHANGED;
5970 
5971     // TODO: From readattrs.ll: "inalloca parameters are always
5972     //                           considered written"
5973     if (hasAttr({Attribute::InAlloca, Attribute::Preallocated})) {
5974       removeKnownBits(NO_WRITES);
5975       removeAssumedBits(NO_WRITES);
5976     }
5977     return AAMemoryBehaviorFloating::manifest(A);
5978   }
5979 
5980   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAMemoryBehaviorArgument5981   void trackStatistics() const override {
5982     if (isAssumedReadNone())
5983       STATS_DECLTRACK_ARG_ATTR(readnone)
5984     else if (isAssumedReadOnly())
5985       STATS_DECLTRACK_ARG_ATTR(readonly)
5986     else if (isAssumedWriteOnly())
5987       STATS_DECLTRACK_ARG_ATTR(writeonly)
5988   }
5989 };
5990 
5991 struct AAMemoryBehaviorCallSiteArgument final : AAMemoryBehaviorArgument {
AAMemoryBehaviorCallSiteArgument__anon0ce335530111::AAMemoryBehaviorCallSiteArgument5992   AAMemoryBehaviorCallSiteArgument(const IRPosition &IRP, Attributor &A)
5993       : AAMemoryBehaviorArgument(IRP, A) {}
5994 
5995   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAMemoryBehaviorCallSiteArgument5996   void initialize(Attributor &A) override {
5997     // If we don't have an associated attribute this is either a variadic call
5998     // or an indirect call, either way, nothing to do here.
5999     Argument *Arg = getAssociatedArgument();
6000     if (!Arg) {
6001       indicatePessimisticFixpoint();
6002       return;
6003     }
6004     if (Arg->hasByValAttr()) {
6005       addKnownBits(NO_WRITES);
6006       removeKnownBits(NO_READS);
6007       removeAssumedBits(NO_READS);
6008     }
6009     AAMemoryBehaviorArgument::initialize(A);
6010     if (getAssociatedFunction()->isDeclaration())
6011       indicatePessimisticFixpoint();
6012   }
6013 
6014   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAMemoryBehaviorCallSiteArgument6015   ChangeStatus updateImpl(Attributor &A) override {
6016     // TODO: Once we have call site specific value information we can provide
6017     //       call site specific liveness liveness information and then it makes
6018     //       sense to specialize attributes for call sites arguments instead of
6019     //       redirecting requests to the callee argument.
6020     Argument *Arg = getAssociatedArgument();
6021     const IRPosition &ArgPos = IRPosition::argument(*Arg);
6022     auto &ArgAA = A.getAAFor<AAMemoryBehavior>(*this, ArgPos);
6023     return clampStateAndIndicateChange(getState(), ArgAA.getState());
6024   }
6025 
6026   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAMemoryBehaviorCallSiteArgument6027   void trackStatistics() const override {
6028     if (isAssumedReadNone())
6029       STATS_DECLTRACK_CSARG_ATTR(readnone)
6030     else if (isAssumedReadOnly())
6031       STATS_DECLTRACK_CSARG_ATTR(readonly)
6032     else if (isAssumedWriteOnly())
6033       STATS_DECLTRACK_CSARG_ATTR(writeonly)
6034   }
6035 };
6036 
6037 /// Memory behavior attribute for a call site return position.
6038 struct AAMemoryBehaviorCallSiteReturned final : AAMemoryBehaviorFloating {
AAMemoryBehaviorCallSiteReturned__anon0ce335530111::AAMemoryBehaviorCallSiteReturned6039   AAMemoryBehaviorCallSiteReturned(const IRPosition &IRP, Attributor &A)
6040       : AAMemoryBehaviorFloating(IRP, A) {}
6041 
6042   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAMemoryBehaviorCallSiteReturned6043   void initialize(Attributor &A) override {
6044     AAMemoryBehaviorImpl::initialize(A);
6045     Function *F = getAssociatedFunction();
6046     if (!F || F->isDeclaration())
6047       indicatePessimisticFixpoint();
6048   }
6049 
6050   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAMemoryBehaviorCallSiteReturned6051   ChangeStatus manifest(Attributor &A) override {
6052     // We do not annotate returned values.
6053     return ChangeStatus::UNCHANGED;
6054   }
6055 
6056   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAMemoryBehaviorCallSiteReturned6057   void trackStatistics() const override {}
6058 };
6059 
6060 /// An AA to represent the memory behavior function attributes.
6061 struct AAMemoryBehaviorFunction final : public AAMemoryBehaviorImpl {
AAMemoryBehaviorFunction__anon0ce335530111::AAMemoryBehaviorFunction6062   AAMemoryBehaviorFunction(const IRPosition &IRP, Attributor &A)
6063       : AAMemoryBehaviorImpl(IRP, A) {}
6064 
6065   /// See AbstractAttribute::updateImpl(Attributor &A).
6066   virtual ChangeStatus updateImpl(Attributor &A) override;
6067 
6068   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335530111::AAMemoryBehaviorFunction6069   ChangeStatus manifest(Attributor &A) override {
6070     Function &F = cast<Function>(getAnchorValue());
6071     if (isAssumedReadNone()) {
6072       F.removeFnAttr(Attribute::ArgMemOnly);
6073       F.removeFnAttr(Attribute::InaccessibleMemOnly);
6074       F.removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
6075     }
6076     return AAMemoryBehaviorImpl::manifest(A);
6077   }
6078 
6079   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAMemoryBehaviorFunction6080   void trackStatistics() const override {
6081     if (isAssumedReadNone())
6082       STATS_DECLTRACK_FN_ATTR(readnone)
6083     else if (isAssumedReadOnly())
6084       STATS_DECLTRACK_FN_ATTR(readonly)
6085     else if (isAssumedWriteOnly())
6086       STATS_DECLTRACK_FN_ATTR(writeonly)
6087   }
6088 };
6089 
6090 /// AAMemoryBehavior attribute for call sites.
6091 struct AAMemoryBehaviorCallSite final : AAMemoryBehaviorImpl {
AAMemoryBehaviorCallSite__anon0ce335530111::AAMemoryBehaviorCallSite6092   AAMemoryBehaviorCallSite(const IRPosition &IRP, Attributor &A)
6093       : AAMemoryBehaviorImpl(IRP, A) {}
6094 
6095   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335530111::AAMemoryBehaviorCallSite6096   void initialize(Attributor &A) override {
6097     AAMemoryBehaviorImpl::initialize(A);
6098     Function *F = getAssociatedFunction();
6099     if (!F || F->isDeclaration())
6100       indicatePessimisticFixpoint();
6101   }
6102 
6103   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335530111::AAMemoryBehaviorCallSite6104   ChangeStatus updateImpl(Attributor &A) override {
6105     // TODO: Once we have call site specific value information we can provide
6106     //       call site specific liveness liveness information and then it makes
6107     //       sense to specialize attributes for call sites arguments instead of
6108     //       redirecting requests to the callee argument.
6109     Function *F = getAssociatedFunction();
6110     const IRPosition &FnPos = IRPosition::function(*F);
6111     auto &FnAA = A.getAAFor<AAMemoryBehavior>(*this, FnPos);
6112     return clampStateAndIndicateChange(getState(), FnAA.getState());
6113   }
6114 
6115   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335530111::AAMemoryBehaviorCallSite6116   void trackStatistics() const override {
6117     if (isAssumedReadNone())
6118       STATS_DECLTRACK_CS_ATTR(readnone)
6119     else if (isAssumedReadOnly())
6120       STATS_DECLTRACK_CS_ATTR(readonly)
6121     else if (isAssumedWriteOnly())
6122       STATS_DECLTRACK_CS_ATTR(writeonly)
6123   }
6124 };
6125 
updateImpl(Attributor & A)6126 ChangeStatus AAMemoryBehaviorFunction::updateImpl(Attributor &A) {
6127 
6128   // The current assumed state used to determine a change.
6129   auto AssumedState = getAssumed();
6130 
6131   auto CheckRWInst = [&](Instruction &I) {
6132     // If the instruction has an own memory behavior state, use it to restrict
6133     // the local state. No further analysis is required as the other memory
6134     // state is as optimistic as it gets.
6135     if (const auto *CB = dyn_cast<CallBase>(&I)) {
6136       const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6137           *this, IRPosition::callsite_function(*CB));
6138       intersectAssumedBits(MemBehaviorAA.getAssumed());
6139       return !isAtFixpoint();
6140     }
6141 
6142     // Remove access kind modifiers if necessary.
6143     if (I.mayReadFromMemory())
6144       removeAssumedBits(NO_READS);
6145     if (I.mayWriteToMemory())
6146       removeAssumedBits(NO_WRITES);
6147     return !isAtFixpoint();
6148   };
6149 
6150   if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6151     return indicatePessimisticFixpoint();
6152 
6153   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6154                                         : ChangeStatus::UNCHANGED;
6155 }
6156 
updateImpl(Attributor & A)6157 ChangeStatus AAMemoryBehaviorFloating::updateImpl(Attributor &A) {
6158 
6159   const IRPosition &IRP = getIRPosition();
6160   const IRPosition &FnPos = IRPosition::function_scope(IRP);
6161   AAMemoryBehavior::StateType &S = getState();
6162 
6163   // First, check the function scope. We take the known information and we avoid
6164   // work if the assumed information implies the current assumed information for
6165   // this attribute. This is a valid for all but byval arguments.
6166   Argument *Arg = IRP.getAssociatedArgument();
6167   AAMemoryBehavior::base_t FnMemAssumedState =
6168       AAMemoryBehavior::StateType::getWorstState();
6169   if (!Arg || !Arg->hasByValAttr()) {
6170     const auto &FnMemAA = A.getAAFor<AAMemoryBehavior>(
6171         *this, FnPos, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6172     FnMemAssumedState = FnMemAA.getAssumed();
6173     S.addKnownBits(FnMemAA.getKnown());
6174     if ((S.getAssumed() & FnMemAA.getAssumed()) == S.getAssumed())
6175       return ChangeStatus::UNCHANGED;
6176   }
6177 
6178   // Make sure the value is not captured (except through "return"), if
6179   // it is, any information derived would be irrelevant anyway as we cannot
6180   // check the potential aliases introduced by the capture. However, no need
6181   // to fall back to anythign less optimistic than the function state.
6182   const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6183       *this, IRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6184   if (!ArgNoCaptureAA.isAssumedNoCaptureMaybeReturned()) {
6185     S.intersectAssumedBits(FnMemAssumedState);
6186     return ChangeStatus::CHANGED;
6187   }
6188 
6189   // The current assumed state used to determine a change.
6190   auto AssumedState = S.getAssumed();
6191 
6192   // Liveness information to exclude dead users.
6193   // TODO: Take the FnPos once we have call site specific liveness information.
6194   const auto &LivenessAA = A.getAAFor<AAIsDead>(
6195       *this, IRPosition::function(*IRP.getAssociatedFunction()),
6196       /* TrackDependence */ false);
6197 
6198   // Visit and expand uses until all are analyzed or a fixpoint is reached.
6199   for (unsigned i = 0; i < Uses.size() && !isAtFixpoint(); i++) {
6200     const Use *U = Uses[i];
6201     Instruction *UserI = cast<Instruction>(U->getUser());
6202     LLVM_DEBUG(dbgs() << "[AAMemoryBehavior] Use: " << **U << " in " << *UserI
6203                       << " [Dead: " << (A.isAssumedDead(*U, this, &LivenessAA))
6204                       << "]\n");
6205     if (A.isAssumedDead(*U, this, &LivenessAA))
6206       continue;
6207 
6208     // Droppable users, e.g., llvm::assume does not actually perform any action.
6209     if (UserI->isDroppable())
6210       continue;
6211 
6212     // Check if the users of UserI should also be visited.
6213     if (followUsersOfUseIn(A, U, UserI))
6214       addUsesOf(A, *UserI);
6215 
6216     // If UserI might touch memory we analyze the use in detail.
6217     if (UserI->mayReadOrWriteMemory())
6218       analyzeUseIn(A, U, UserI);
6219   }
6220 
6221   return (AssumedState != getAssumed()) ? ChangeStatus::CHANGED
6222                                         : ChangeStatus::UNCHANGED;
6223 }
6224 
addUsesOf(Attributor & A,const Value & V)6225 void AAMemoryBehaviorFloating::addUsesOf(Attributor &A, const Value &V) {
6226   SmallVector<const Use *, 8> WL;
6227   for (const Use &U : V.uses())
6228     WL.push_back(&U);
6229 
6230   while (!WL.empty()) {
6231     const Use *U = WL.pop_back_val();
6232     if (!Visited.insert(U).second)
6233       continue;
6234 
6235     const Instruction *UserI = cast<Instruction>(U->getUser());
6236     if (UserI->mayReadOrWriteMemory()) {
6237       Uses.push_back(U);
6238       continue;
6239     }
6240     if (!followUsersOfUseIn(A, U, UserI))
6241       continue;
6242     for (const Use &UU : UserI->uses())
6243       WL.push_back(&UU);
6244   }
6245 }
6246 
followUsersOfUseIn(Attributor & A,const Use * U,const Instruction * UserI)6247 bool AAMemoryBehaviorFloating::followUsersOfUseIn(Attributor &A, const Use *U,
6248                                                   const Instruction *UserI) {
6249   // The loaded value is unrelated to the pointer argument, no need to
6250   // follow the users of the load.
6251   if (isa<LoadInst>(UserI))
6252     return false;
6253 
6254   // By default we follow all uses assuming UserI might leak information on U,
6255   // we have special handling for call sites operands though.
6256   const auto *CB = dyn_cast<CallBase>(UserI);
6257   if (!CB || !CB->isArgOperand(U))
6258     return true;
6259 
6260   // If the use is a call argument known not to be captured, the users of
6261   // the call do not need to be visited because they have to be unrelated to
6262   // the input. Note that this check is not trivial even though we disallow
6263   // general capturing of the underlying argument. The reason is that the
6264   // call might the argument "through return", which we allow and for which we
6265   // need to check call users.
6266   if (U->get()->getType()->isPointerTy()) {
6267     unsigned ArgNo = CB->getArgOperandNo(U);
6268     const auto &ArgNoCaptureAA = A.getAAFor<AANoCapture>(
6269         *this, IRPosition::callsite_argument(*CB, ArgNo),
6270         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6271     return !ArgNoCaptureAA.isAssumedNoCapture();
6272   }
6273 
6274   return true;
6275 }
6276 
analyzeUseIn(Attributor & A,const Use * U,const Instruction * UserI)6277 void AAMemoryBehaviorFloating::analyzeUseIn(Attributor &A, const Use *U,
6278                                             const Instruction *UserI) {
6279   assert(UserI->mayReadOrWriteMemory());
6280 
6281   switch (UserI->getOpcode()) {
6282   default:
6283     // TODO: Handle all atomics and other side-effect operations we know of.
6284     break;
6285   case Instruction::Load:
6286     // Loads cause the NO_READS property to disappear.
6287     removeAssumedBits(NO_READS);
6288     return;
6289 
6290   case Instruction::Store:
6291     // Stores cause the NO_WRITES property to disappear if the use is the
6292     // pointer operand. Note that we do assume that capturing was taken care of
6293     // somewhere else.
6294     if (cast<StoreInst>(UserI)->getPointerOperand() == U->get())
6295       removeAssumedBits(NO_WRITES);
6296     return;
6297 
6298   case Instruction::Call:
6299   case Instruction::CallBr:
6300   case Instruction::Invoke: {
6301     // For call sites we look at the argument memory behavior attribute (this
6302     // could be recursive!) in order to restrict our own state.
6303     const auto *CB = cast<CallBase>(UserI);
6304 
6305     // Give up on operand bundles.
6306     if (CB->isBundleOperand(U)) {
6307       indicatePessimisticFixpoint();
6308       return;
6309     }
6310 
6311     // Calling a function does read the function pointer, maybe write it if the
6312     // function is self-modifying.
6313     if (CB->isCallee(U)) {
6314       removeAssumedBits(NO_READS);
6315       break;
6316     }
6317 
6318     // Adjust the possible access behavior based on the information on the
6319     // argument.
6320     IRPosition Pos;
6321     if (U->get()->getType()->isPointerTy())
6322       Pos = IRPosition::callsite_argument(*CB, CB->getArgOperandNo(U));
6323     else
6324       Pos = IRPosition::callsite_function(*CB);
6325     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6326         *this, Pos,
6327         /* TrackDependence */ true, DepClassTy::OPTIONAL);
6328     // "assumed" has at most the same bits as the MemBehaviorAA assumed
6329     // and at least "known".
6330     intersectAssumedBits(MemBehaviorAA.getAssumed());
6331     return;
6332   }
6333   };
6334 
6335   // Generally, look at the "may-properties" and adjust the assumed state if we
6336   // did not trigger special handling before.
6337   if (UserI->mayReadFromMemory())
6338     removeAssumedBits(NO_READS);
6339   if (UserI->mayWriteToMemory())
6340     removeAssumedBits(NO_WRITES);
6341 }
6342 
6343 } // namespace
6344 
6345 /// -------------------- Memory Locations Attributes ---------------------------
6346 /// Includes read-none, argmemonly, inaccessiblememonly,
6347 /// inaccessiblememorargmemonly
6348 /// ----------------------------------------------------------------------------
6349 
getMemoryLocationsAsStr(AAMemoryLocation::MemoryLocationsKind MLK)6350 std::string AAMemoryLocation::getMemoryLocationsAsStr(
6351     AAMemoryLocation::MemoryLocationsKind MLK) {
6352   if (0 == (MLK & AAMemoryLocation::NO_LOCATIONS))
6353     return "all memory";
6354   if (MLK == AAMemoryLocation::NO_LOCATIONS)
6355     return "no memory";
6356   std::string S = "memory:";
6357   if (0 == (MLK & AAMemoryLocation::NO_LOCAL_MEM))
6358     S += "stack,";
6359   if (0 == (MLK & AAMemoryLocation::NO_CONST_MEM))
6360     S += "constant,";
6361   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_INTERNAL_MEM))
6362     S += "internal global,";
6363   if (0 == (MLK & AAMemoryLocation::NO_GLOBAL_EXTERNAL_MEM))
6364     S += "external global,";
6365   if (0 == (MLK & AAMemoryLocation::NO_ARGUMENT_MEM))
6366     S += "argument,";
6367   if (0 == (MLK & AAMemoryLocation::NO_INACCESSIBLE_MEM))
6368     S += "inaccessible,";
6369   if (0 == (MLK & AAMemoryLocation::NO_MALLOCED_MEM))
6370     S += "malloced,";
6371   if (0 == (MLK & AAMemoryLocation::NO_UNKOWN_MEM))
6372     S += "unknown,";
6373   S.pop_back();
6374   return S;
6375 }
6376 
6377 namespace {
6378 struct AAMemoryLocationImpl : public AAMemoryLocation {
6379 
AAMemoryLocationImpl__anon0ce335533911::AAMemoryLocationImpl6380   AAMemoryLocationImpl(const IRPosition &IRP, Attributor &A)
6381       : AAMemoryLocation(IRP, A), Allocator(A.Allocator) {
6382     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6383       AccessKind2Accesses[u] = nullptr;
6384   }
6385 
~AAMemoryLocationImpl__anon0ce335533911::AAMemoryLocationImpl6386   ~AAMemoryLocationImpl() {
6387     // The AccessSets are allocated via a BumpPtrAllocator, we call
6388     // the destructor manually.
6389     for (unsigned u = 0; u < llvm::CTLog2<VALID_STATE>(); ++u)
6390       if (AccessKind2Accesses[u])
6391         AccessKind2Accesses[u]->~AccessSet();
6392   }
6393 
6394   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335533911::AAMemoryLocationImpl6395   void initialize(Attributor &A) override {
6396     intersectAssumedBits(BEST_STATE);
6397     getKnownStateFromValue(A, getIRPosition(), getState());
6398     AAMemoryLocation::initialize(A);
6399   }
6400 
6401   /// Return the memory behavior information encoded in the IR for \p IRP.
getKnownStateFromValue__anon0ce335533911::AAMemoryLocationImpl6402   static void getKnownStateFromValue(Attributor &A, const IRPosition &IRP,
6403                                      BitIntegerState &State,
6404                                      bool IgnoreSubsumingPositions = false) {
6405     // For internal functions we ignore `argmemonly` and
6406     // `inaccessiblememorargmemonly` as we might break it via interprocedural
6407     // constant propagation. It is unclear if this is the best way but it is
6408     // unlikely this will cause real performance problems. If we are deriving
6409     // attributes for the anchor function we even remove the attribute in
6410     // addition to ignoring it.
6411     bool UseArgMemOnly = true;
6412     Function *AnchorFn = IRP.getAnchorScope();
6413     if (AnchorFn && A.isRunOn(*AnchorFn))
6414       UseArgMemOnly = !AnchorFn->hasLocalLinkage();
6415 
6416     SmallVector<Attribute, 2> Attrs;
6417     IRP.getAttrs(AttrKinds, Attrs, IgnoreSubsumingPositions);
6418     for (const Attribute &Attr : Attrs) {
6419       switch (Attr.getKindAsEnum()) {
6420       case Attribute::ReadNone:
6421         State.addKnownBits(NO_LOCAL_MEM | NO_CONST_MEM);
6422         break;
6423       case Attribute::InaccessibleMemOnly:
6424         State.addKnownBits(inverseLocation(NO_INACCESSIBLE_MEM, true, true));
6425         break;
6426       case Attribute::ArgMemOnly:
6427         if (UseArgMemOnly)
6428           State.addKnownBits(inverseLocation(NO_ARGUMENT_MEM, true, true));
6429         else
6430           IRP.removeAttrs({Attribute::ArgMemOnly});
6431         break;
6432       case Attribute::InaccessibleMemOrArgMemOnly:
6433         if (UseArgMemOnly)
6434           State.addKnownBits(inverseLocation(
6435               NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true));
6436         else
6437           IRP.removeAttrs({Attribute::InaccessibleMemOrArgMemOnly});
6438         break;
6439       default:
6440         llvm_unreachable("Unexpected attribute!");
6441       }
6442     }
6443   }
6444 
6445   /// See AbstractAttribute::getDeducedAttributes(...).
getDeducedAttributes__anon0ce335533911::AAMemoryLocationImpl6446   void getDeducedAttributes(LLVMContext &Ctx,
6447                             SmallVectorImpl<Attribute> &Attrs) const override {
6448     assert(Attrs.size() == 0);
6449     if (isAssumedReadNone()) {
6450       Attrs.push_back(Attribute::get(Ctx, Attribute::ReadNone));
6451     } else if (getIRPosition().getPositionKind() == IRPosition::IRP_FUNCTION) {
6452       if (isAssumedInaccessibleMemOnly())
6453         Attrs.push_back(Attribute::get(Ctx, Attribute::InaccessibleMemOnly));
6454       else if (isAssumedArgMemOnly())
6455         Attrs.push_back(Attribute::get(Ctx, Attribute::ArgMemOnly));
6456       else if (isAssumedInaccessibleOrArgMemOnly())
6457         Attrs.push_back(
6458             Attribute::get(Ctx, Attribute::InaccessibleMemOrArgMemOnly));
6459     }
6460     assert(Attrs.size() <= 1);
6461   }
6462 
6463   /// See AbstractAttribute::manifest(...).
manifest__anon0ce335533911::AAMemoryLocationImpl6464   ChangeStatus manifest(Attributor &A) override {
6465     const IRPosition &IRP = getIRPosition();
6466 
6467     // Check if we would improve the existing attributes first.
6468     SmallVector<Attribute, 4> DeducedAttrs;
6469     getDeducedAttributes(IRP.getAnchorValue().getContext(), DeducedAttrs);
6470     if (llvm::all_of(DeducedAttrs, [&](const Attribute &Attr) {
6471           return IRP.hasAttr(Attr.getKindAsEnum(),
6472                              /* IgnoreSubsumingPositions */ true);
6473         }))
6474       return ChangeStatus::UNCHANGED;
6475 
6476     // Clear existing attributes.
6477     IRP.removeAttrs(AttrKinds);
6478     if (isAssumedReadNone())
6479       IRP.removeAttrs(AAMemoryBehaviorImpl::AttrKinds);
6480 
6481     // Use the generic manifest method.
6482     return IRAttribute::manifest(A);
6483   }
6484 
6485   /// See AAMemoryLocation::checkForAllAccessesToMemoryKind(...).
checkForAllAccessesToMemoryKind__anon0ce335533911::AAMemoryLocationImpl6486   bool checkForAllAccessesToMemoryKind(
6487       function_ref<bool(const Instruction *, const Value *, AccessKind,
6488                         MemoryLocationsKind)>
6489           Pred,
6490       MemoryLocationsKind RequestedMLK) const override {
6491     if (!isValidState())
6492       return false;
6493 
6494     MemoryLocationsKind AssumedMLK = getAssumedNotAccessedLocation();
6495     if (AssumedMLK == NO_LOCATIONS)
6496       return true;
6497 
6498     unsigned Idx = 0;
6499     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS;
6500          CurMLK *= 2, ++Idx) {
6501       if (CurMLK & RequestedMLK)
6502         continue;
6503 
6504       if (const AccessSet *Accesses = AccessKind2Accesses[Idx])
6505         for (const AccessInfo &AI : *Accesses)
6506           if (!Pred(AI.I, AI.Ptr, AI.Kind, CurMLK))
6507             return false;
6508     }
6509 
6510     return true;
6511   }
6512 
indicatePessimisticFixpoint__anon0ce335533911::AAMemoryLocationImpl6513   ChangeStatus indicatePessimisticFixpoint() override {
6514     // If we give up and indicate a pessimistic fixpoint this instruction will
6515     // become an access for all potential access kinds:
6516     // TODO: Add pointers for argmemonly and globals to improve the results of
6517     //       checkForAllAccessesToMemoryKind.
6518     bool Changed = false;
6519     MemoryLocationsKind KnownMLK = getKnown();
6520     Instruction *I = dyn_cast<Instruction>(&getAssociatedValue());
6521     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2)
6522       if (!(CurMLK & KnownMLK))
6523         updateStateAndAccessesMap(getState(), CurMLK, I, nullptr, Changed,
6524                                   getAccessKindFromInst(I));
6525     return AAMemoryLocation::indicatePessimisticFixpoint();
6526   }
6527 
6528 protected:
6529   /// Helper struct to tie together an instruction that has a read or write
6530   /// effect with the pointer it accesses (if any).
6531   struct AccessInfo {
6532 
6533     /// The instruction that caused the access.
6534     const Instruction *I;
6535 
6536     /// The base pointer that is accessed, or null if unknown.
6537     const Value *Ptr;
6538 
6539     /// The kind of access (read/write/read+write).
6540     AccessKind Kind;
6541 
operator ==__anon0ce335533911::AAMemoryLocationImpl::AccessInfo6542     bool operator==(const AccessInfo &RHS) const {
6543       return I == RHS.I && Ptr == RHS.Ptr && Kind == RHS.Kind;
6544     }
operator ()__anon0ce335533911::AAMemoryLocationImpl::AccessInfo6545     bool operator()(const AccessInfo &LHS, const AccessInfo &RHS) const {
6546       if (LHS.I != RHS.I)
6547         return LHS.I < RHS.I;
6548       if (LHS.Ptr != RHS.Ptr)
6549         return LHS.Ptr < RHS.Ptr;
6550       if (LHS.Kind != RHS.Kind)
6551         return LHS.Kind < RHS.Kind;
6552       return false;
6553     }
6554   };
6555 
6556   /// Mapping from *single* memory location kinds, e.g., LOCAL_MEM with the
6557   /// value of NO_LOCAL_MEM, to the accesses encountered for this memory kind.
6558   using AccessSet = SmallSet<AccessInfo, 2, AccessInfo>;
6559   AccessSet *AccessKind2Accesses[llvm::CTLog2<VALID_STATE>()];
6560 
6561   /// Categorize the pointer arguments of CB that might access memory in
6562   /// AccessedLoc and update the state and access map accordingly.
6563   void
6564   categorizeArgumentPointerLocations(Attributor &A, CallBase &CB,
6565                                      AAMemoryLocation::StateType &AccessedLocs,
6566                                      bool &Changed);
6567 
6568   /// Return the kind(s) of location that may be accessed by \p V.
6569   AAMemoryLocation::MemoryLocationsKind
6570   categorizeAccessedLocations(Attributor &A, Instruction &I, bool &Changed);
6571 
6572   /// Return the access kind as determined by \p I.
getAccessKindFromInst__anon0ce335533911::AAMemoryLocationImpl6573   AccessKind getAccessKindFromInst(const Instruction *I) {
6574     AccessKind AK = READ_WRITE;
6575     if (I) {
6576       AK = I->mayReadFromMemory() ? READ : NONE;
6577       AK = AccessKind(AK | (I->mayWriteToMemory() ? WRITE : NONE));
6578     }
6579     return AK;
6580   }
6581 
6582   /// Update the state \p State and the AccessKind2Accesses given that \p I is
6583   /// an access of kind \p AK to a \p MLK memory location with the access
6584   /// pointer \p Ptr.
updateStateAndAccessesMap__anon0ce335533911::AAMemoryLocationImpl6585   void updateStateAndAccessesMap(AAMemoryLocation::StateType &State,
6586                                  MemoryLocationsKind MLK, const Instruction *I,
6587                                  const Value *Ptr, bool &Changed,
6588                                  AccessKind AK = READ_WRITE) {
6589 
6590     assert(isPowerOf2_32(MLK) && "Expected a single location set!");
6591     auto *&Accesses = AccessKind2Accesses[llvm::Log2_32(MLK)];
6592     if (!Accesses)
6593       Accesses = new (Allocator) AccessSet();
6594     Changed |= Accesses->insert(AccessInfo{I, Ptr, AK}).second;
6595     State.removeAssumedBits(MLK);
6596   }
6597 
6598   /// Determine the underlying locations kinds for \p Ptr, e.g., globals or
6599   /// arguments, and update the state and access map accordingly.
6600   void categorizePtrValue(Attributor &A, const Instruction &I, const Value &Ptr,
6601                           AAMemoryLocation::StateType &State, bool &Changed);
6602 
6603   /// Used to allocate access sets.
6604   BumpPtrAllocator &Allocator;
6605 
6606   /// The set of IR attributes AAMemoryLocation deals with.
6607   static const Attribute::AttrKind AttrKinds[4];
6608 };
6609 
6610 const Attribute::AttrKind AAMemoryLocationImpl::AttrKinds[] = {
6611     Attribute::ReadNone, Attribute::InaccessibleMemOnly, Attribute::ArgMemOnly,
6612     Attribute::InaccessibleMemOrArgMemOnly};
6613 
categorizePtrValue(Attributor & A,const Instruction & I,const Value & Ptr,AAMemoryLocation::StateType & State,bool & Changed)6614 void AAMemoryLocationImpl::categorizePtrValue(
6615     Attributor &A, const Instruction &I, const Value &Ptr,
6616     AAMemoryLocation::StateType &State, bool &Changed) {
6617   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize pointer locations for "
6618                     << Ptr << " ["
6619                     << getMemoryLocationsAsStr(State.getAssumed()) << "]\n");
6620 
6621   auto StripGEPCB = [](Value *V) -> Value * {
6622     auto *GEP = dyn_cast<GEPOperator>(V);
6623     while (GEP) {
6624       V = GEP->getPointerOperand();
6625       GEP = dyn_cast<GEPOperator>(V);
6626     }
6627     return V;
6628   };
6629 
6630   auto VisitValueCB = [&](Value &V, const Instruction *,
6631                           AAMemoryLocation::StateType &T,
6632                           bool Stripped) -> bool {
6633     // TODO: recognize the TBAA used for constant accesses.
6634     MemoryLocationsKind MLK = NO_LOCATIONS;
6635     assert(!isa<GEPOperator>(V) && "GEPs should have been stripped.");
6636     if (isa<UndefValue>(V))
6637       return true;
6638     if (auto *Arg = dyn_cast<Argument>(&V)) {
6639       if (Arg->hasByValAttr())
6640         MLK = NO_LOCAL_MEM;
6641       else
6642         MLK = NO_ARGUMENT_MEM;
6643     } else if (auto *GV = dyn_cast<GlobalValue>(&V)) {
6644       // Reading constant memory is not treated as a read "effect" by the
6645       // function attr pass so we won't neither. Constants defined by TBAA are
6646       // similar. (We know we do not write it because it is constant.)
6647       if (auto *GVar = dyn_cast<GlobalVariable>(GV))
6648         if (GVar->isConstant())
6649           return true;
6650 
6651       if (GV->hasLocalLinkage())
6652         MLK = NO_GLOBAL_INTERNAL_MEM;
6653       else
6654         MLK = NO_GLOBAL_EXTERNAL_MEM;
6655     } else if (isa<ConstantPointerNull>(V) &&
6656                !NullPointerIsDefined(getAssociatedFunction(),
6657                                      V.getType()->getPointerAddressSpace())) {
6658       return true;
6659     } else if (isa<AllocaInst>(V)) {
6660       MLK = NO_LOCAL_MEM;
6661     } else if (const auto *CB = dyn_cast<CallBase>(&V)) {
6662       const auto &NoAliasAA =
6663           A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(*CB));
6664       if (NoAliasAA.isAssumedNoAlias())
6665         MLK = NO_MALLOCED_MEM;
6666       else
6667         MLK = NO_UNKOWN_MEM;
6668     } else {
6669       MLK = NO_UNKOWN_MEM;
6670     }
6671 
6672     assert(MLK != NO_LOCATIONS && "No location specified!");
6673     updateStateAndAccessesMap(T, MLK, &I, &V, Changed,
6674                               getAccessKindFromInst(&I));
6675     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Ptr value cannot be categorized: "
6676                       << V << " -> " << getMemoryLocationsAsStr(T.getAssumed())
6677                       << "\n");
6678     return true;
6679   };
6680 
6681   if (!genericValueTraversal<AAMemoryLocation, AAMemoryLocation::StateType>(
6682           A, IRPosition::value(Ptr), *this, State, VisitValueCB, getCtxI(),
6683           /* UseValueSimplify */ true,
6684           /* MaxValues */ 32, StripGEPCB)) {
6685     LLVM_DEBUG(
6686         dbgs() << "[AAMemoryLocation] Pointer locations not categorized\n");
6687     updateStateAndAccessesMap(State, NO_UNKOWN_MEM, &I, nullptr, Changed,
6688                               getAccessKindFromInst(&I));
6689   } else {
6690     LLVM_DEBUG(
6691         dbgs()
6692         << "[AAMemoryLocation] Accessed locations with pointer locations: "
6693         << getMemoryLocationsAsStr(State.getAssumed()) << "\n");
6694   }
6695 }
6696 
categorizeArgumentPointerLocations(Attributor & A,CallBase & CB,AAMemoryLocation::StateType & AccessedLocs,bool & Changed)6697 void AAMemoryLocationImpl::categorizeArgumentPointerLocations(
6698     Attributor &A, CallBase &CB, AAMemoryLocation::StateType &AccessedLocs,
6699     bool &Changed) {
6700   for (unsigned ArgNo = 0, E = CB.getNumArgOperands(); ArgNo < E; ++ArgNo) {
6701 
6702     // Skip non-pointer arguments.
6703     const Value *ArgOp = CB.getArgOperand(ArgNo);
6704     if (!ArgOp->getType()->isPtrOrPtrVectorTy())
6705       continue;
6706 
6707     // Skip readnone arguments.
6708     const IRPosition &ArgOpIRP = IRPosition::callsite_argument(CB, ArgNo);
6709     const auto &ArgOpMemLocationAA = A.getAAFor<AAMemoryBehavior>(
6710         *this, ArgOpIRP, /* TrackDependence */ true, DepClassTy::OPTIONAL);
6711 
6712     if (ArgOpMemLocationAA.isAssumedReadNone())
6713       continue;
6714 
6715     // Categorize potentially accessed pointer arguments as if there was an
6716     // access instruction with them as pointer.
6717     categorizePtrValue(A, CB, *ArgOp, AccessedLocs, Changed);
6718   }
6719 }
6720 
6721 AAMemoryLocation::MemoryLocationsKind
categorizeAccessedLocations(Attributor & A,Instruction & I,bool & Changed)6722 AAMemoryLocationImpl::categorizeAccessedLocations(Attributor &A, Instruction &I,
6723                                                   bool &Changed) {
6724   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize accessed locations for "
6725                     << I << "\n");
6726 
6727   AAMemoryLocation::StateType AccessedLocs;
6728   AccessedLocs.intersectAssumedBits(NO_LOCATIONS);
6729 
6730   if (auto *CB = dyn_cast<CallBase>(&I)) {
6731 
6732     // First check if we assume any memory is access is visible.
6733     const auto &CBMemLocationAA =
6734         A.getAAFor<AAMemoryLocation>(*this, IRPosition::callsite_function(*CB));
6735     LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Categorize call site: " << I
6736                       << " [" << CBMemLocationAA << "]\n");
6737 
6738     if (CBMemLocationAA.isAssumedReadNone())
6739       return NO_LOCATIONS;
6740 
6741     if (CBMemLocationAA.isAssumedInaccessibleMemOnly()) {
6742       updateStateAndAccessesMap(AccessedLocs, NO_INACCESSIBLE_MEM, &I, nullptr,
6743                                 Changed, getAccessKindFromInst(&I));
6744       return AccessedLocs.getAssumed();
6745     }
6746 
6747     uint32_t CBAssumedNotAccessedLocs =
6748         CBMemLocationAA.getAssumedNotAccessedLocation();
6749 
6750     // Set the argmemonly and global bit as we handle them separately below.
6751     uint32_t CBAssumedNotAccessedLocsNoArgMem =
6752         CBAssumedNotAccessedLocs | NO_ARGUMENT_MEM | NO_GLOBAL_MEM;
6753 
6754     for (MemoryLocationsKind CurMLK = 1; CurMLK < NO_LOCATIONS; CurMLK *= 2) {
6755       if (CBAssumedNotAccessedLocsNoArgMem & CurMLK)
6756         continue;
6757       updateStateAndAccessesMap(AccessedLocs, CurMLK, &I, nullptr, Changed,
6758                                 getAccessKindFromInst(&I));
6759     }
6760 
6761     // Now handle global memory if it might be accessed. This is slightly tricky
6762     // as NO_GLOBAL_MEM has multiple bits set.
6763     bool HasGlobalAccesses = ((~CBAssumedNotAccessedLocs) & NO_GLOBAL_MEM);
6764     if (HasGlobalAccesses) {
6765       auto AccessPred = [&](const Instruction *, const Value *Ptr,
6766                             AccessKind Kind, MemoryLocationsKind MLK) {
6767         updateStateAndAccessesMap(AccessedLocs, MLK, &I, Ptr, Changed,
6768                                   getAccessKindFromInst(&I));
6769         return true;
6770       };
6771       if (!CBMemLocationAA.checkForAllAccessesToMemoryKind(
6772               AccessPred, inverseLocation(NO_GLOBAL_MEM, false, false)))
6773         return AccessedLocs.getWorstState();
6774     }
6775 
6776     LLVM_DEBUG(
6777         dbgs() << "[AAMemoryLocation] Accessed state before argument handling: "
6778                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6779 
6780     // Now handle argument memory if it might be accessed.
6781     bool HasArgAccesses = ((~CBAssumedNotAccessedLocs) & NO_ARGUMENT_MEM);
6782     if (HasArgAccesses)
6783       categorizeArgumentPointerLocations(A, *CB, AccessedLocs, Changed);
6784 
6785     LLVM_DEBUG(
6786         dbgs() << "[AAMemoryLocation] Accessed state after argument handling: "
6787                << getMemoryLocationsAsStr(AccessedLocs.getAssumed()) << "\n");
6788 
6789     return AccessedLocs.getAssumed();
6790   }
6791 
6792   if (const Value *Ptr = getPointerOperand(&I, /* AllowVolatile */ true)) {
6793     LLVM_DEBUG(
6794         dbgs() << "[AAMemoryLocation] Categorize memory access with pointer: "
6795                << I << " [" << *Ptr << "]\n");
6796     categorizePtrValue(A, I, *Ptr, AccessedLocs, Changed);
6797     return AccessedLocs.getAssumed();
6798   }
6799 
6800   LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Failed to categorize instruction: "
6801                     << I << "\n");
6802   updateStateAndAccessesMap(AccessedLocs, NO_UNKOWN_MEM, &I, nullptr, Changed,
6803                             getAccessKindFromInst(&I));
6804   return AccessedLocs.getAssumed();
6805 }
6806 
6807 /// An AA to represent the memory behavior function attributes.
6808 struct AAMemoryLocationFunction final : public AAMemoryLocationImpl {
AAMemoryLocationFunction__anon0ce335533911::AAMemoryLocationFunction6809   AAMemoryLocationFunction(const IRPosition &IRP, Attributor &A)
6810       : AAMemoryLocationImpl(IRP, A) {}
6811 
6812   /// See AbstractAttribute::updateImpl(Attributor &A).
updateImpl__anon0ce335533911::AAMemoryLocationFunction6813   virtual ChangeStatus updateImpl(Attributor &A) override {
6814 
6815     const auto &MemBehaviorAA = A.getAAFor<AAMemoryBehavior>(
6816         *this, getIRPosition(), /* TrackDependence */ false);
6817     if (MemBehaviorAA.isAssumedReadNone()) {
6818       if (MemBehaviorAA.isKnownReadNone())
6819         return indicateOptimisticFixpoint();
6820       assert(isAssumedReadNone() &&
6821              "AAMemoryLocation was not read-none but AAMemoryBehavior was!");
6822       A.recordDependence(MemBehaviorAA, *this, DepClassTy::OPTIONAL);
6823       return ChangeStatus::UNCHANGED;
6824     }
6825 
6826     // The current assumed state used to determine a change.
6827     auto AssumedState = getAssumed();
6828     bool Changed = false;
6829 
6830     auto CheckRWInst = [&](Instruction &I) {
6831       MemoryLocationsKind MLK = categorizeAccessedLocations(A, I, Changed);
6832       LLVM_DEBUG(dbgs() << "[AAMemoryLocation] Accessed locations for " << I
6833                         << ": " << getMemoryLocationsAsStr(MLK) << "\n");
6834       removeAssumedBits(inverseLocation(MLK, false, false));
6835       // Stop once only the valid bit set in the *not assumed location*, thus
6836       // once we don't actually exclude any memory locations in the state.
6837       return getAssumedNotAccessedLocation() != VALID_STATE;
6838     };
6839 
6840     if (!A.checkForAllReadWriteInstructions(CheckRWInst, *this))
6841       return indicatePessimisticFixpoint();
6842 
6843     Changed |= AssumedState != getAssumed();
6844     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6845   }
6846 
6847   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAMemoryLocationFunction6848   void trackStatistics() const override {
6849     if (isAssumedReadNone())
6850       STATS_DECLTRACK_FN_ATTR(readnone)
6851     else if (isAssumedArgMemOnly())
6852       STATS_DECLTRACK_FN_ATTR(argmemonly)
6853     else if (isAssumedInaccessibleMemOnly())
6854       STATS_DECLTRACK_FN_ATTR(inaccessiblememonly)
6855     else if (isAssumedInaccessibleOrArgMemOnly())
6856       STATS_DECLTRACK_FN_ATTR(inaccessiblememorargmemonly)
6857   }
6858 };
6859 
6860 /// AAMemoryLocation attribute for call sites.
6861 struct AAMemoryLocationCallSite final : AAMemoryLocationImpl {
AAMemoryLocationCallSite__anon0ce335533911::AAMemoryLocationCallSite6862   AAMemoryLocationCallSite(const IRPosition &IRP, Attributor &A)
6863       : AAMemoryLocationImpl(IRP, A) {}
6864 
6865   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335533911::AAMemoryLocationCallSite6866   void initialize(Attributor &A) override {
6867     AAMemoryLocationImpl::initialize(A);
6868     Function *F = getAssociatedFunction();
6869     if (!F || F->isDeclaration())
6870       indicatePessimisticFixpoint();
6871   }
6872 
6873   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335533911::AAMemoryLocationCallSite6874   ChangeStatus updateImpl(Attributor &A) override {
6875     // TODO: Once we have call site specific value information we can provide
6876     //       call site specific liveness liveness information and then it makes
6877     //       sense to specialize attributes for call sites arguments instead of
6878     //       redirecting requests to the callee argument.
6879     Function *F = getAssociatedFunction();
6880     const IRPosition &FnPos = IRPosition::function(*F);
6881     auto &FnAA = A.getAAFor<AAMemoryLocation>(*this, FnPos);
6882     bool Changed = false;
6883     auto AccessPred = [&](const Instruction *I, const Value *Ptr,
6884                           AccessKind Kind, MemoryLocationsKind MLK) {
6885       updateStateAndAccessesMap(getState(), MLK, I, Ptr, Changed,
6886                                 getAccessKindFromInst(I));
6887       return true;
6888     };
6889     if (!FnAA.checkForAllAccessesToMemoryKind(AccessPred, ALL_LOCATIONS))
6890       return indicatePessimisticFixpoint();
6891     return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
6892   }
6893 
6894   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAMemoryLocationCallSite6895   void trackStatistics() const override {
6896     if (isAssumedReadNone())
6897       STATS_DECLTRACK_CS_ATTR(readnone)
6898   }
6899 };
6900 
6901 /// ------------------ Value Constant Range Attribute -------------------------
6902 
6903 struct AAValueConstantRangeImpl : AAValueConstantRange {
6904   using StateType = IntegerRangeState;
AAValueConstantRangeImpl__anon0ce335533911::AAValueConstantRangeImpl6905   AAValueConstantRangeImpl(const IRPosition &IRP, Attributor &A)
6906       : AAValueConstantRange(IRP, A) {}
6907 
6908   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335533911::AAValueConstantRangeImpl6909   const std::string getAsStr() const override {
6910     std::string Str;
6911     llvm::raw_string_ostream OS(Str);
6912     OS << "range(" << getBitWidth() << ")<";
6913     getKnown().print(OS);
6914     OS << " / ";
6915     getAssumed().print(OS);
6916     OS << ">";
6917     return OS.str();
6918   }
6919 
6920   /// Helper function to get a SCEV expr for the associated value at program
6921   /// point \p I.
getSCEV__anon0ce335533911::AAValueConstantRangeImpl6922   const SCEV *getSCEV(Attributor &A, const Instruction *I = nullptr) const {
6923     if (!getAnchorScope())
6924       return nullptr;
6925 
6926     ScalarEvolution *SE =
6927         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6928             *getAnchorScope());
6929 
6930     LoopInfo *LI = A.getInfoCache().getAnalysisResultForFunction<LoopAnalysis>(
6931         *getAnchorScope());
6932 
6933     if (!SE || !LI)
6934       return nullptr;
6935 
6936     const SCEV *S = SE->getSCEV(&getAssociatedValue());
6937     if (!I)
6938       return S;
6939 
6940     return SE->getSCEVAtScope(S, LI->getLoopFor(I->getParent()));
6941   }
6942 
6943   /// Helper function to get a range from SCEV for the associated value at
6944   /// program point \p I.
getConstantRangeFromSCEV__anon0ce335533911::AAValueConstantRangeImpl6945   ConstantRange getConstantRangeFromSCEV(Attributor &A,
6946                                          const Instruction *I = nullptr) const {
6947     if (!getAnchorScope())
6948       return getWorstState(getBitWidth());
6949 
6950     ScalarEvolution *SE =
6951         A.getInfoCache().getAnalysisResultForFunction<ScalarEvolutionAnalysis>(
6952             *getAnchorScope());
6953 
6954     const SCEV *S = getSCEV(A, I);
6955     if (!SE || !S)
6956       return getWorstState(getBitWidth());
6957 
6958     return SE->getUnsignedRange(S);
6959   }
6960 
6961   /// Helper function to get a range from LVI for the associated value at
6962   /// program point \p I.
6963   ConstantRange
getConstantRangeFromLVI__anon0ce335533911::AAValueConstantRangeImpl6964   getConstantRangeFromLVI(Attributor &A,
6965                           const Instruction *CtxI = nullptr) const {
6966     if (!getAnchorScope())
6967       return getWorstState(getBitWidth());
6968 
6969     LazyValueInfo *LVI =
6970         A.getInfoCache().getAnalysisResultForFunction<LazyValueAnalysis>(
6971             *getAnchorScope());
6972 
6973     if (!LVI || !CtxI)
6974       return getWorstState(getBitWidth());
6975     return LVI->getConstantRange(&getAssociatedValue(),
6976                                  const_cast<Instruction *>(CtxI));
6977   }
6978 
6979   /// See AAValueConstantRange::getKnownConstantRange(..).
6980   ConstantRange
getKnownConstantRange__anon0ce335533911::AAValueConstantRangeImpl6981   getKnownConstantRange(Attributor &A,
6982                         const Instruction *CtxI = nullptr) const override {
6983     if (!CtxI || CtxI == getCtxI())
6984       return getKnown();
6985 
6986     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
6987     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
6988     return getKnown().intersectWith(SCEVR).intersectWith(LVIR);
6989   }
6990 
6991   /// See AAValueConstantRange::getAssumedConstantRange(..).
6992   ConstantRange
getAssumedConstantRange__anon0ce335533911::AAValueConstantRangeImpl6993   getAssumedConstantRange(Attributor &A,
6994                           const Instruction *CtxI = nullptr) const override {
6995     // TODO: Make SCEV use Attributor assumption.
6996     //       We may be able to bound a variable range via assumptions in
6997     //       Attributor. ex.) If x is assumed to be in [1, 3] and y is known to
6998     //       evolve to x^2 + x, then we can say that y is in [2, 12].
6999 
7000     if (!CtxI || CtxI == getCtxI())
7001       return getAssumed();
7002 
7003     ConstantRange LVIR = getConstantRangeFromLVI(A, CtxI);
7004     ConstantRange SCEVR = getConstantRangeFromSCEV(A, CtxI);
7005     return getAssumed().intersectWith(SCEVR).intersectWith(LVIR);
7006   }
7007 
7008   /// See AbstractAttribute::initialize(..).
initialize__anon0ce335533911::AAValueConstantRangeImpl7009   void initialize(Attributor &A) override {
7010     // Intersect a range given by SCEV.
7011     intersectKnown(getConstantRangeFromSCEV(A, getCtxI()));
7012 
7013     // Intersect a range given by LVI.
7014     intersectKnown(getConstantRangeFromLVI(A, getCtxI()));
7015   }
7016 
7017   /// Helper function to create MDNode for range metadata.
7018   static MDNode *
getMDNodeForConstantRange__anon0ce335533911::AAValueConstantRangeImpl7019   getMDNodeForConstantRange(Type *Ty, LLVMContext &Ctx,
7020                             const ConstantRange &AssumedConstantRange) {
7021     Metadata *LowAndHigh[] = {ConstantAsMetadata::get(ConstantInt::get(
7022                                   Ty, AssumedConstantRange.getLower())),
7023                               ConstantAsMetadata::get(ConstantInt::get(
7024                                   Ty, AssumedConstantRange.getUpper()))};
7025     return MDNode::get(Ctx, LowAndHigh);
7026   }
7027 
7028   /// Return true if \p Assumed is included in \p KnownRanges.
isBetterRange__anon0ce335533911::AAValueConstantRangeImpl7029   static bool isBetterRange(const ConstantRange &Assumed, MDNode *KnownRanges) {
7030 
7031     if (Assumed.isFullSet())
7032       return false;
7033 
7034     if (!KnownRanges)
7035       return true;
7036 
7037     // If multiple ranges are annotated in IR, we give up to annotate assumed
7038     // range for now.
7039 
7040     // TODO:  If there exists a known range which containts assumed range, we
7041     // can say assumed range is better.
7042     if (KnownRanges->getNumOperands() > 2)
7043       return false;
7044 
7045     ConstantInt *Lower =
7046         mdconst::extract<ConstantInt>(KnownRanges->getOperand(0));
7047     ConstantInt *Upper =
7048         mdconst::extract<ConstantInt>(KnownRanges->getOperand(1));
7049 
7050     ConstantRange Known(Lower->getValue(), Upper->getValue());
7051     return Known.contains(Assumed) && Known != Assumed;
7052   }
7053 
7054   /// Helper function to set range metadata.
7055   static bool
setRangeMetadataIfisBetterRange__anon0ce335533911::AAValueConstantRangeImpl7056   setRangeMetadataIfisBetterRange(Instruction *I,
7057                                   const ConstantRange &AssumedConstantRange) {
7058     auto *OldRangeMD = I->getMetadata(LLVMContext::MD_range);
7059     if (isBetterRange(AssumedConstantRange, OldRangeMD)) {
7060       if (!AssumedConstantRange.isEmptySet()) {
7061         I->setMetadata(LLVMContext::MD_range,
7062                        getMDNodeForConstantRange(I->getType(), I->getContext(),
7063                                                  AssumedConstantRange));
7064         return true;
7065       }
7066     }
7067     return false;
7068   }
7069 
7070   /// See AbstractAttribute::manifest()
manifest__anon0ce335533911::AAValueConstantRangeImpl7071   ChangeStatus manifest(Attributor &A) override {
7072     ChangeStatus Changed = ChangeStatus::UNCHANGED;
7073     ConstantRange AssumedConstantRange = getAssumedConstantRange(A);
7074     assert(!AssumedConstantRange.isFullSet() && "Invalid state");
7075 
7076     auto &V = getAssociatedValue();
7077     if (!AssumedConstantRange.isEmptySet() &&
7078         !AssumedConstantRange.isSingleElement()) {
7079       if (Instruction *I = dyn_cast<Instruction>(&V)) {
7080         assert(I == getCtxI() && "Should not annotate an instruction which is "
7081                                  "not the context instruction");
7082         if (isa<CallInst>(I) || isa<LoadInst>(I))
7083           if (setRangeMetadataIfisBetterRange(I, AssumedConstantRange))
7084             Changed = ChangeStatus::CHANGED;
7085       }
7086     }
7087 
7088     return Changed;
7089   }
7090 };
7091 
7092 struct AAValueConstantRangeArgument final
7093     : AAArgumentFromCallSiteArguments<
7094           AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState> {
7095   using Base = AAArgumentFromCallSiteArguments<
7096       AAValueConstantRange, AAValueConstantRangeImpl, IntegerRangeState>;
AAValueConstantRangeArgument__anon0ce335533911::AAValueConstantRangeArgument7097   AAValueConstantRangeArgument(const IRPosition &IRP, Attributor &A)
7098       : Base(IRP, A) {}
7099 
7100   /// See AbstractAttribute::initialize(..).
initialize__anon0ce335533911::AAValueConstantRangeArgument7101   void initialize(Attributor &A) override {
7102     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7103       indicatePessimisticFixpoint();
7104     } else {
7105       Base::initialize(A);
7106     }
7107   }
7108 
7109   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAValueConstantRangeArgument7110   void trackStatistics() const override {
7111     STATS_DECLTRACK_ARG_ATTR(value_range)
7112   }
7113 };
7114 
7115 struct AAValueConstantRangeReturned
7116     : AAReturnedFromReturnedValues<AAValueConstantRange,
7117                                    AAValueConstantRangeImpl> {
7118   using Base = AAReturnedFromReturnedValues<AAValueConstantRange,
7119                                             AAValueConstantRangeImpl>;
AAValueConstantRangeReturned__anon0ce335533911::AAValueConstantRangeReturned7120   AAValueConstantRangeReturned(const IRPosition &IRP, Attributor &A)
7121       : Base(IRP, A) {}
7122 
7123   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335533911::AAValueConstantRangeReturned7124   void initialize(Attributor &A) override {}
7125 
7126   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAValueConstantRangeReturned7127   void trackStatistics() const override {
7128     STATS_DECLTRACK_FNRET_ATTR(value_range)
7129   }
7130 };
7131 
7132 struct AAValueConstantRangeFloating : AAValueConstantRangeImpl {
AAValueConstantRangeFloating__anon0ce335533911::AAValueConstantRangeFloating7133   AAValueConstantRangeFloating(const IRPosition &IRP, Attributor &A)
7134       : AAValueConstantRangeImpl(IRP, A) {}
7135 
7136   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335533911::AAValueConstantRangeFloating7137   void initialize(Attributor &A) override {
7138     AAValueConstantRangeImpl::initialize(A);
7139     Value &V = getAssociatedValue();
7140 
7141     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7142       unionAssumed(ConstantRange(C->getValue()));
7143       indicateOptimisticFixpoint();
7144       return;
7145     }
7146 
7147     if (isa<UndefValue>(&V)) {
7148       // Collapse the undef state to 0.
7149       unionAssumed(ConstantRange(APInt(getBitWidth(), 0)));
7150       indicateOptimisticFixpoint();
7151       return;
7152     }
7153 
7154     if (isa<CallBase>(&V))
7155       return;
7156 
7157     if (isa<BinaryOperator>(&V) || isa<CmpInst>(&V) || isa<CastInst>(&V))
7158       return;
7159     // If it is a load instruction with range metadata, use it.
7160     if (LoadInst *LI = dyn_cast<LoadInst>(&V))
7161       if (auto *RangeMD = LI->getMetadata(LLVMContext::MD_range)) {
7162         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7163         return;
7164       }
7165 
7166     // We can work with PHI and select instruction as we traverse their operands
7167     // during update.
7168     if (isa<SelectInst>(V) || isa<PHINode>(V))
7169       return;
7170 
7171     // Otherwise we give up.
7172     indicatePessimisticFixpoint();
7173 
7174     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] We give up: "
7175                       << getAssociatedValue() << "\n");
7176   }
7177 
calculateBinaryOperator__anon0ce335533911::AAValueConstantRangeFloating7178   bool calculateBinaryOperator(
7179       Attributor &A, BinaryOperator *BinOp, IntegerRangeState &T,
7180       const Instruction *CtxI,
7181       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7182     Value *LHS = BinOp->getOperand(0);
7183     Value *RHS = BinOp->getOperand(1);
7184     // TODO: Allow non integers as well.
7185     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7186       return false;
7187 
7188     auto &LHSAA =
7189         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7190     QuerriedAAs.push_back(&LHSAA);
7191     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7192 
7193     auto &RHSAA =
7194         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7195     QuerriedAAs.push_back(&RHSAA);
7196     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7197 
7198     auto AssumedRange = LHSAARange.binaryOp(BinOp->getOpcode(), RHSAARange);
7199 
7200     T.unionAssumed(AssumedRange);
7201 
7202     // TODO: Track a known state too.
7203 
7204     return T.isValidState();
7205   }
7206 
calculateCastInst__anon0ce335533911::AAValueConstantRangeFloating7207   bool calculateCastInst(
7208       Attributor &A, CastInst *CastI, IntegerRangeState &T,
7209       const Instruction *CtxI,
7210       SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7211     assert(CastI->getNumOperands() == 1 && "Expected cast to be unary!");
7212     // TODO: Allow non integers as well.
7213     Value &OpV = *CastI->getOperand(0);
7214     if (!OpV.getType()->isIntegerTy())
7215       return false;
7216 
7217     auto &OpAA =
7218         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(OpV));
7219     QuerriedAAs.push_back(&OpAA);
7220     T.unionAssumed(
7221         OpAA.getAssumed().castOp(CastI->getOpcode(), getState().getBitWidth()));
7222     return T.isValidState();
7223   }
7224 
7225   bool
calculateCmpInst__anon0ce335533911::AAValueConstantRangeFloating7226   calculateCmpInst(Attributor &A, CmpInst *CmpI, IntegerRangeState &T,
7227                    const Instruction *CtxI,
7228                    SmallVectorImpl<const AAValueConstantRange *> &QuerriedAAs) {
7229     Value *LHS = CmpI->getOperand(0);
7230     Value *RHS = CmpI->getOperand(1);
7231     // TODO: Allow non integers as well.
7232     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7233       return false;
7234 
7235     auto &LHSAA =
7236         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*LHS));
7237     QuerriedAAs.push_back(&LHSAA);
7238     auto &RHSAA =
7239         A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(*RHS));
7240     QuerriedAAs.push_back(&RHSAA);
7241 
7242     auto LHSAARange = LHSAA.getAssumedConstantRange(A, CtxI);
7243     auto RHSAARange = RHSAA.getAssumedConstantRange(A, CtxI);
7244 
7245     // If one of them is empty set, we can't decide.
7246     if (LHSAARange.isEmptySet() || RHSAARange.isEmptySet())
7247       return true;
7248 
7249     bool MustTrue = false, MustFalse = false;
7250 
7251     auto AllowedRegion =
7252         ConstantRange::makeAllowedICmpRegion(CmpI->getPredicate(), RHSAARange);
7253 
7254     auto SatisfyingRegion = ConstantRange::makeSatisfyingICmpRegion(
7255         CmpI->getPredicate(), RHSAARange);
7256 
7257     if (AllowedRegion.intersectWith(LHSAARange).isEmptySet())
7258       MustFalse = true;
7259 
7260     if (SatisfyingRegion.contains(LHSAARange))
7261       MustTrue = true;
7262 
7263     assert((!MustTrue || !MustFalse) &&
7264            "Either MustTrue or MustFalse should be false!");
7265 
7266     if (MustTrue)
7267       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 1)));
7268     else if (MustFalse)
7269       T.unionAssumed(ConstantRange(APInt(/* numBits */ 1, /* val */ 0)));
7270     else
7271       T.unionAssumed(ConstantRange(/* BitWidth */ 1, /* isFullSet */ true));
7272 
7273     LLVM_DEBUG(dbgs() << "[AAValueConstantRange] " << *CmpI << " " << LHSAA
7274                       << " " << RHSAA << "\n");
7275 
7276     // TODO: Track a known state too.
7277     return T.isValidState();
7278   }
7279 
7280   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335533911::AAValueConstantRangeFloating7281   ChangeStatus updateImpl(Attributor &A) override {
7282     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7283                             IntegerRangeState &T, bool Stripped) -> bool {
7284       Instruction *I = dyn_cast<Instruction>(&V);
7285       if (!I || isa<CallBase>(I)) {
7286 
7287         // If the value is not instruction, we query AA to Attributor.
7288         const auto &AA =
7289             A.getAAFor<AAValueConstantRange>(*this, IRPosition::value(V));
7290 
7291         // Clamp operator is not used to utilize a program point CtxI.
7292         T.unionAssumed(AA.getAssumedConstantRange(A, CtxI));
7293 
7294         return T.isValidState();
7295       }
7296 
7297       SmallVector<const AAValueConstantRange *, 4> QuerriedAAs;
7298       if (auto *BinOp = dyn_cast<BinaryOperator>(I)) {
7299         if (!calculateBinaryOperator(A, BinOp, T, CtxI, QuerriedAAs))
7300           return false;
7301       } else if (auto *CmpI = dyn_cast<CmpInst>(I)) {
7302         if (!calculateCmpInst(A, CmpI, T, CtxI, QuerriedAAs))
7303           return false;
7304       } else if (auto *CastI = dyn_cast<CastInst>(I)) {
7305         if (!calculateCastInst(A, CastI, T, CtxI, QuerriedAAs))
7306           return false;
7307       } else {
7308         // Give up with other instructions.
7309         // TODO: Add other instructions
7310 
7311         T.indicatePessimisticFixpoint();
7312         return false;
7313       }
7314 
7315       // Catch circular reasoning in a pessimistic way for now.
7316       // TODO: Check how the range evolves and if we stripped anything, see also
7317       //       AADereferenceable or AAAlign for similar situations.
7318       for (const AAValueConstantRange *QueriedAA : QuerriedAAs) {
7319         if (QueriedAA != this)
7320           continue;
7321         // If we are in a stady state we do not need to worry.
7322         if (T.getAssumed() == getState().getAssumed())
7323           continue;
7324         T.indicatePessimisticFixpoint();
7325       }
7326 
7327       return T.isValidState();
7328     };
7329 
7330     IntegerRangeState T(getBitWidth());
7331 
7332     if (!genericValueTraversal<AAValueConstantRange, IntegerRangeState>(
7333             A, getIRPosition(), *this, T, VisitValueCB, getCtxI(),
7334             /* UseValueSimplify */ false))
7335       return indicatePessimisticFixpoint();
7336 
7337     return clampStateAndIndicateChange(getState(), T);
7338   }
7339 
7340   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAValueConstantRangeFloating7341   void trackStatistics() const override {
7342     STATS_DECLTRACK_FLOATING_ATTR(value_range)
7343   }
7344 };
7345 
7346 struct AAValueConstantRangeFunction : AAValueConstantRangeImpl {
AAValueConstantRangeFunction__anon0ce335533911::AAValueConstantRangeFunction7347   AAValueConstantRangeFunction(const IRPosition &IRP, Attributor &A)
7348       : AAValueConstantRangeImpl(IRP, A) {}
7349 
7350   /// See AbstractAttribute::initialize(...).
updateImpl__anon0ce335533911::AAValueConstantRangeFunction7351   ChangeStatus updateImpl(Attributor &A) override {
7352     llvm_unreachable("AAValueConstantRange(Function|CallSite)::updateImpl will "
7353                      "not be called");
7354   }
7355 
7356   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAValueConstantRangeFunction7357   void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(value_range) }
7358 };
7359 
7360 struct AAValueConstantRangeCallSite : AAValueConstantRangeFunction {
AAValueConstantRangeCallSite__anon0ce335533911::AAValueConstantRangeCallSite7361   AAValueConstantRangeCallSite(const IRPosition &IRP, Attributor &A)
7362       : AAValueConstantRangeFunction(IRP, A) {}
7363 
7364   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAValueConstantRangeCallSite7365   void trackStatistics() const override { STATS_DECLTRACK_CS_ATTR(value_range) }
7366 };
7367 
7368 struct AAValueConstantRangeCallSiteReturned
7369     : AACallSiteReturnedFromReturned<AAValueConstantRange,
7370                                      AAValueConstantRangeImpl> {
AAValueConstantRangeCallSiteReturned__anon0ce335533911::AAValueConstantRangeCallSiteReturned7371   AAValueConstantRangeCallSiteReturned(const IRPosition &IRP, Attributor &A)
7372       : AACallSiteReturnedFromReturned<AAValueConstantRange,
7373                                        AAValueConstantRangeImpl>(IRP, A) {}
7374 
7375   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335533911::AAValueConstantRangeCallSiteReturned7376   void initialize(Attributor &A) override {
7377     // If it is a load instruction with range metadata, use the metadata.
7378     if (CallInst *CI = dyn_cast<CallInst>(&getAssociatedValue()))
7379       if (auto *RangeMD = CI->getMetadata(LLVMContext::MD_range))
7380         intersectKnown(getConstantRangeFromMetadata(*RangeMD));
7381 
7382     AAValueConstantRangeImpl::initialize(A);
7383   }
7384 
7385   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAValueConstantRangeCallSiteReturned7386   void trackStatistics() const override {
7387     STATS_DECLTRACK_CSRET_ATTR(value_range)
7388   }
7389 };
7390 struct AAValueConstantRangeCallSiteArgument : AAValueConstantRangeFloating {
AAValueConstantRangeCallSiteArgument__anon0ce335533911::AAValueConstantRangeCallSiteArgument7391   AAValueConstantRangeCallSiteArgument(const IRPosition &IRP, Attributor &A)
7392       : AAValueConstantRangeFloating(IRP, A) {}
7393 
7394   /// See AbstractAttribute::manifest()
manifest__anon0ce335533911::AAValueConstantRangeCallSiteArgument7395   ChangeStatus manifest(Attributor &A) override {
7396     return ChangeStatus::UNCHANGED;
7397   }
7398 
7399   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAValueConstantRangeCallSiteArgument7400   void trackStatistics() const override {
7401     STATS_DECLTRACK_CSARG_ATTR(value_range)
7402   }
7403 };
7404 
7405 /// ------------------ Potential Values Attribute -------------------------
7406 
7407 struct AAPotentialValuesImpl : AAPotentialValues {
7408   using StateType = PotentialConstantIntValuesState;
7409 
AAPotentialValuesImpl__anon0ce335533911::AAPotentialValuesImpl7410   AAPotentialValuesImpl(const IRPosition &IRP, Attributor &A)
7411       : AAPotentialValues(IRP, A) {}
7412 
7413   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335533911::AAPotentialValuesImpl7414   const std::string getAsStr() const override {
7415     std::string Str;
7416     llvm::raw_string_ostream OS(Str);
7417     OS << getState();
7418     return OS.str();
7419   }
7420 
7421   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335533911::AAPotentialValuesImpl7422   ChangeStatus updateImpl(Attributor &A) override {
7423     return indicatePessimisticFixpoint();
7424   }
7425 };
7426 
7427 struct AAPotentialValuesArgument final
7428     : AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7429                                       PotentialConstantIntValuesState> {
7430   using Base =
7431       AAArgumentFromCallSiteArguments<AAPotentialValues, AAPotentialValuesImpl,
7432                                       PotentialConstantIntValuesState>;
AAPotentialValuesArgument__anon0ce335533911::AAPotentialValuesArgument7433   AAPotentialValuesArgument(const IRPosition &IRP, Attributor &A)
7434       : Base(IRP, A) {}
7435 
7436   /// See AbstractAttribute::initialize(..).
initialize__anon0ce335533911::AAPotentialValuesArgument7437   void initialize(Attributor &A) override {
7438     if (!getAnchorScope() || getAnchorScope()->isDeclaration()) {
7439       indicatePessimisticFixpoint();
7440     } else {
7441       Base::initialize(A);
7442     }
7443   }
7444 
7445   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAPotentialValuesArgument7446   void trackStatistics() const override {
7447     STATS_DECLTRACK_ARG_ATTR(potential_values)
7448   }
7449 };
7450 
7451 struct AAPotentialValuesReturned
7452     : AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl> {
7453   using Base =
7454       AAReturnedFromReturnedValues<AAPotentialValues, AAPotentialValuesImpl>;
AAPotentialValuesReturned__anon0ce335533911::AAPotentialValuesReturned7455   AAPotentialValuesReturned(const IRPosition &IRP, Attributor &A)
7456       : Base(IRP, A) {}
7457 
7458   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAPotentialValuesReturned7459   void trackStatistics() const override {
7460     STATS_DECLTRACK_FNRET_ATTR(potential_values)
7461   }
7462 };
7463 
7464 struct AAPotentialValuesFloating : AAPotentialValuesImpl {
AAPotentialValuesFloating__anon0ce335533911::AAPotentialValuesFloating7465   AAPotentialValuesFloating(const IRPosition &IRP, Attributor &A)
7466       : AAPotentialValuesImpl(IRP, A) {}
7467 
7468   /// See AbstractAttribute::initialize(..).
initialize__anon0ce335533911::AAPotentialValuesFloating7469   void initialize(Attributor &A) override {
7470     Value &V = getAssociatedValue();
7471 
7472     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7473       unionAssumed(C->getValue());
7474       indicateOptimisticFixpoint();
7475       return;
7476     }
7477 
7478     if (isa<UndefValue>(&V)) {
7479       unionAssumedWithUndef();
7480       indicateOptimisticFixpoint();
7481       return;
7482     }
7483 
7484     if (isa<BinaryOperator>(&V) || isa<ICmpInst>(&V) || isa<CastInst>(&V))
7485       return;
7486 
7487     if (isa<SelectInst>(V) || isa<PHINode>(V))
7488       return;
7489 
7490     indicatePessimisticFixpoint();
7491 
7492     LLVM_DEBUG(dbgs() << "[AAPotentialValues] We give up: "
7493                       << getAssociatedValue() << "\n");
7494   }
7495 
calculateICmpInst__anon0ce335533911::AAPotentialValuesFloating7496   static bool calculateICmpInst(const ICmpInst *ICI, const APInt &LHS,
7497                                 const APInt &RHS) {
7498     ICmpInst::Predicate Pred = ICI->getPredicate();
7499     switch (Pred) {
7500     case ICmpInst::ICMP_UGT:
7501       return LHS.ugt(RHS);
7502     case ICmpInst::ICMP_SGT:
7503       return LHS.sgt(RHS);
7504     case ICmpInst::ICMP_EQ:
7505       return LHS.eq(RHS);
7506     case ICmpInst::ICMP_UGE:
7507       return LHS.uge(RHS);
7508     case ICmpInst::ICMP_SGE:
7509       return LHS.sge(RHS);
7510     case ICmpInst::ICMP_ULT:
7511       return LHS.ult(RHS);
7512     case ICmpInst::ICMP_SLT:
7513       return LHS.slt(RHS);
7514     case ICmpInst::ICMP_NE:
7515       return LHS.ne(RHS);
7516     case ICmpInst::ICMP_ULE:
7517       return LHS.ule(RHS);
7518     case ICmpInst::ICMP_SLE:
7519       return LHS.sle(RHS);
7520     default:
7521       llvm_unreachable("Invalid ICmp predicate!");
7522     }
7523   }
7524 
calculateCastInst__anon0ce335533911::AAPotentialValuesFloating7525   static APInt calculateCastInst(const CastInst *CI, const APInt &Src,
7526                                  uint32_t ResultBitWidth) {
7527     Instruction::CastOps CastOp = CI->getOpcode();
7528     switch (CastOp) {
7529     default:
7530       llvm_unreachable("unsupported or not integer cast");
7531     case Instruction::Trunc:
7532       return Src.trunc(ResultBitWidth);
7533     case Instruction::SExt:
7534       return Src.sext(ResultBitWidth);
7535     case Instruction::ZExt:
7536       return Src.zext(ResultBitWidth);
7537     case Instruction::BitCast:
7538       return Src;
7539     }
7540   }
7541 
calculateBinaryOperator__anon0ce335533911::AAPotentialValuesFloating7542   static APInt calculateBinaryOperator(const BinaryOperator *BinOp,
7543                                        const APInt &LHS, const APInt &RHS,
7544                                        bool &SkipOperation, bool &Unsupported) {
7545     Instruction::BinaryOps BinOpcode = BinOp->getOpcode();
7546     // Unsupported is set to true when the binary operator is not supported.
7547     // SkipOperation is set to true when UB occur with the given operand pair
7548     // (LHS, RHS).
7549     // TODO: we should look at nsw and nuw keywords to handle operations
7550     //       that create poison or undef value.
7551     switch (BinOpcode) {
7552     default:
7553       Unsupported = true;
7554       return LHS;
7555     case Instruction::Add:
7556       return LHS + RHS;
7557     case Instruction::Sub:
7558       return LHS - RHS;
7559     case Instruction::Mul:
7560       return LHS * RHS;
7561     case Instruction::UDiv:
7562       if (RHS.isNullValue()) {
7563         SkipOperation = true;
7564         return LHS;
7565       }
7566       return LHS.udiv(RHS);
7567     case Instruction::SDiv:
7568       if (RHS.isNullValue()) {
7569         SkipOperation = true;
7570         return LHS;
7571       }
7572       return LHS.sdiv(RHS);
7573     case Instruction::URem:
7574       if (RHS.isNullValue()) {
7575         SkipOperation = true;
7576         return LHS;
7577       }
7578       return LHS.urem(RHS);
7579     case Instruction::SRem:
7580       if (RHS.isNullValue()) {
7581         SkipOperation = true;
7582         return LHS;
7583       }
7584       return LHS.srem(RHS);
7585     case Instruction::Shl:
7586       return LHS.shl(RHS);
7587     case Instruction::LShr:
7588       return LHS.lshr(RHS);
7589     case Instruction::AShr:
7590       return LHS.ashr(RHS);
7591     case Instruction::And:
7592       return LHS & RHS;
7593     case Instruction::Or:
7594       return LHS | RHS;
7595     case Instruction::Xor:
7596       return LHS ^ RHS;
7597     }
7598   }
7599 
calculateBinaryOperatorAndTakeUnion__anon0ce335533911::AAPotentialValuesFloating7600   bool calculateBinaryOperatorAndTakeUnion(const BinaryOperator *BinOp,
7601                                            const APInt &LHS, const APInt &RHS) {
7602     bool SkipOperation = false;
7603     bool Unsupported = false;
7604     APInt Result =
7605         calculateBinaryOperator(BinOp, LHS, RHS, SkipOperation, Unsupported);
7606     if (Unsupported)
7607       return false;
7608     // If SkipOperation is true, we can ignore this operand pair (L, R).
7609     if (!SkipOperation)
7610       unionAssumed(Result);
7611     return isValidState();
7612   }
7613 
updateWithICmpInst__anon0ce335533911::AAPotentialValuesFloating7614   ChangeStatus updateWithICmpInst(Attributor &A, ICmpInst *ICI) {
7615     auto AssumedBefore = getAssumed();
7616     Value *LHS = ICI->getOperand(0);
7617     Value *RHS = ICI->getOperand(1);
7618     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7619       return indicatePessimisticFixpoint();
7620 
7621     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7622     if (!LHSAA.isValidState())
7623       return indicatePessimisticFixpoint();
7624 
7625     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7626     if (!RHSAA.isValidState())
7627       return indicatePessimisticFixpoint();
7628 
7629     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7630     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7631 
7632     // TODO: make use of undef flag to limit potential values aggressively.
7633     bool MaybeTrue = false, MaybeFalse = false;
7634     const APInt Zero(RHS->getType()->getIntegerBitWidth(), 0);
7635     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7636       // The result of any comparison between undefs can be soundly replaced
7637       // with undef.
7638       unionAssumedWithUndef();
7639     } else if (LHSAA.undefIsContained()) {
7640       bool MaybeTrue = false, MaybeFalse = false;
7641       for (const APInt &R : RHSAAPVS) {
7642         bool CmpResult = calculateICmpInst(ICI, Zero, R);
7643         MaybeTrue |= CmpResult;
7644         MaybeFalse |= !CmpResult;
7645         if (MaybeTrue & MaybeFalse)
7646           return indicatePessimisticFixpoint();
7647       }
7648     } else if (RHSAA.undefIsContained()) {
7649       for (const APInt &L : LHSAAPVS) {
7650         bool CmpResult = calculateICmpInst(ICI, L, Zero);
7651         MaybeTrue |= CmpResult;
7652         MaybeFalse |= !CmpResult;
7653         if (MaybeTrue & MaybeFalse)
7654           return indicatePessimisticFixpoint();
7655       }
7656     } else {
7657       for (const APInt &L : LHSAAPVS) {
7658         for (const APInt &R : RHSAAPVS) {
7659           bool CmpResult = calculateICmpInst(ICI, L, R);
7660           MaybeTrue |= CmpResult;
7661           MaybeFalse |= !CmpResult;
7662           if (MaybeTrue & MaybeFalse)
7663             return indicatePessimisticFixpoint();
7664         }
7665       }
7666     }
7667     if (MaybeTrue)
7668       unionAssumed(APInt(/* numBits */ 1, /* val */ 1));
7669     if (MaybeFalse)
7670       unionAssumed(APInt(/* numBits */ 1, /* val */ 0));
7671     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7672                                          : ChangeStatus::CHANGED;
7673   }
7674 
updateWithSelectInst__anon0ce335533911::AAPotentialValuesFloating7675   ChangeStatus updateWithSelectInst(Attributor &A, SelectInst *SI) {
7676     auto AssumedBefore = getAssumed();
7677     Value *LHS = SI->getTrueValue();
7678     Value *RHS = SI->getFalseValue();
7679     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7680       return indicatePessimisticFixpoint();
7681 
7682     // TODO: Use assumed simplified condition value
7683     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7684     if (!LHSAA.isValidState())
7685       return indicatePessimisticFixpoint();
7686 
7687     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7688     if (!RHSAA.isValidState())
7689       return indicatePessimisticFixpoint();
7690 
7691     if (LHSAA.undefIsContained() && RHSAA.undefIsContained())
7692       // select i1 *, undef , undef => undef
7693       unionAssumedWithUndef();
7694     else {
7695       unionAssumed(LHSAA);
7696       unionAssumed(RHSAA);
7697     }
7698     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7699                                          : ChangeStatus::CHANGED;
7700   }
7701 
updateWithCastInst__anon0ce335533911::AAPotentialValuesFloating7702   ChangeStatus updateWithCastInst(Attributor &A, CastInst *CI) {
7703     auto AssumedBefore = getAssumed();
7704     if (!CI->isIntegerCast())
7705       return indicatePessimisticFixpoint();
7706     assert(CI->getNumOperands() == 1 && "Expected cast to be unary!");
7707     uint32_t ResultBitWidth = CI->getDestTy()->getIntegerBitWidth();
7708     Value *Src = CI->getOperand(0);
7709     auto &SrcAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*Src));
7710     if (!SrcAA.isValidState())
7711       return indicatePessimisticFixpoint();
7712     const DenseSet<APInt> &SrcAAPVS = SrcAA.getAssumedSet();
7713     if (SrcAA.undefIsContained())
7714       unionAssumedWithUndef();
7715     else {
7716       for (const APInt &S : SrcAAPVS) {
7717         APInt T = calculateCastInst(CI, S, ResultBitWidth);
7718         unionAssumed(T);
7719       }
7720     }
7721     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7722                                          : ChangeStatus::CHANGED;
7723   }
7724 
updateWithBinaryOperator__anon0ce335533911::AAPotentialValuesFloating7725   ChangeStatus updateWithBinaryOperator(Attributor &A, BinaryOperator *BinOp) {
7726     auto AssumedBefore = getAssumed();
7727     Value *LHS = BinOp->getOperand(0);
7728     Value *RHS = BinOp->getOperand(1);
7729     if (!LHS->getType()->isIntegerTy() || !RHS->getType()->isIntegerTy())
7730       return indicatePessimisticFixpoint();
7731 
7732     auto &LHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*LHS));
7733     if (!LHSAA.isValidState())
7734       return indicatePessimisticFixpoint();
7735 
7736     auto &RHSAA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(*RHS));
7737     if (!RHSAA.isValidState())
7738       return indicatePessimisticFixpoint();
7739 
7740     const DenseSet<APInt> &LHSAAPVS = LHSAA.getAssumedSet();
7741     const DenseSet<APInt> &RHSAAPVS = RHSAA.getAssumedSet();
7742     const APInt Zero = APInt(LHS->getType()->getIntegerBitWidth(), 0);
7743 
7744     // TODO: make use of undef flag to limit potential values aggressively.
7745     if (LHSAA.undefIsContained() && RHSAA.undefIsContained()) {
7746       if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, Zero))
7747         return indicatePessimisticFixpoint();
7748     } else if (LHSAA.undefIsContained()) {
7749       for (const APInt &R : RHSAAPVS) {
7750         if (!calculateBinaryOperatorAndTakeUnion(BinOp, Zero, R))
7751           return indicatePessimisticFixpoint();
7752       }
7753     } else if (RHSAA.undefIsContained()) {
7754       for (const APInt &L : LHSAAPVS) {
7755         if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, Zero))
7756           return indicatePessimisticFixpoint();
7757       }
7758     } else {
7759       for (const APInt &L : LHSAAPVS) {
7760         for (const APInt &R : RHSAAPVS) {
7761           if (!calculateBinaryOperatorAndTakeUnion(BinOp, L, R))
7762             return indicatePessimisticFixpoint();
7763         }
7764       }
7765     }
7766     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7767                                          : ChangeStatus::CHANGED;
7768   }
7769 
updateWithPHINode__anon0ce335533911::AAPotentialValuesFloating7770   ChangeStatus updateWithPHINode(Attributor &A, PHINode *PHI) {
7771     auto AssumedBefore = getAssumed();
7772     for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
7773       Value *IncomingValue = PHI->getIncomingValue(u);
7774       auto &PotentialValuesAA = A.getAAFor<AAPotentialValues>(
7775           *this, IRPosition::value(*IncomingValue));
7776       if (!PotentialValuesAA.isValidState())
7777         return indicatePessimisticFixpoint();
7778       if (PotentialValuesAA.undefIsContained())
7779         unionAssumedWithUndef();
7780       else
7781         unionAssumed(PotentialValuesAA.getAssumed());
7782     }
7783     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7784                                          : ChangeStatus::CHANGED;
7785   }
7786 
7787   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335533911::AAPotentialValuesFloating7788   ChangeStatus updateImpl(Attributor &A) override {
7789     Value &V = getAssociatedValue();
7790     Instruction *I = dyn_cast<Instruction>(&V);
7791 
7792     if (auto *ICI = dyn_cast<ICmpInst>(I))
7793       return updateWithICmpInst(A, ICI);
7794 
7795     if (auto *SI = dyn_cast<SelectInst>(I))
7796       return updateWithSelectInst(A, SI);
7797 
7798     if (auto *CI = dyn_cast<CastInst>(I))
7799       return updateWithCastInst(A, CI);
7800 
7801     if (auto *BinOp = dyn_cast<BinaryOperator>(I))
7802       return updateWithBinaryOperator(A, BinOp);
7803 
7804     if (auto *PHI = dyn_cast<PHINode>(I))
7805       return updateWithPHINode(A, PHI);
7806 
7807     return indicatePessimisticFixpoint();
7808   }
7809 
7810   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAPotentialValuesFloating7811   void trackStatistics() const override {
7812     STATS_DECLTRACK_FLOATING_ATTR(potential_values)
7813   }
7814 };
7815 
7816 struct AAPotentialValuesFunction : AAPotentialValuesImpl {
AAPotentialValuesFunction__anon0ce335533911::AAPotentialValuesFunction7817   AAPotentialValuesFunction(const IRPosition &IRP, Attributor &A)
7818       : AAPotentialValuesImpl(IRP, A) {}
7819 
7820   /// See AbstractAttribute::initialize(...).
updateImpl__anon0ce335533911::AAPotentialValuesFunction7821   ChangeStatus updateImpl(Attributor &A) override {
7822     llvm_unreachable("AAPotentialValues(Function|CallSite)::updateImpl will "
7823                      "not be called");
7824   }
7825 
7826   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAPotentialValuesFunction7827   void trackStatistics() const override {
7828     STATS_DECLTRACK_FN_ATTR(potential_values)
7829   }
7830 };
7831 
7832 struct AAPotentialValuesCallSite : AAPotentialValuesFunction {
AAPotentialValuesCallSite__anon0ce335533911::AAPotentialValuesCallSite7833   AAPotentialValuesCallSite(const IRPosition &IRP, Attributor &A)
7834       : AAPotentialValuesFunction(IRP, A) {}
7835 
7836   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAPotentialValuesCallSite7837   void trackStatistics() const override {
7838     STATS_DECLTRACK_CS_ATTR(potential_values)
7839   }
7840 };
7841 
7842 struct AAPotentialValuesCallSiteReturned
7843     : AACallSiteReturnedFromReturned<AAPotentialValues, AAPotentialValuesImpl> {
AAPotentialValuesCallSiteReturned__anon0ce335533911::AAPotentialValuesCallSiteReturned7844   AAPotentialValuesCallSiteReturned(const IRPosition &IRP, Attributor &A)
7845       : AACallSiteReturnedFromReturned<AAPotentialValues,
7846                                        AAPotentialValuesImpl>(IRP, A) {}
7847 
7848   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAPotentialValuesCallSiteReturned7849   void trackStatistics() const override {
7850     STATS_DECLTRACK_CSRET_ATTR(potential_values)
7851   }
7852 };
7853 
7854 struct AAPotentialValuesCallSiteArgument : AAPotentialValuesFloating {
AAPotentialValuesCallSiteArgument__anon0ce335533911::AAPotentialValuesCallSiteArgument7855   AAPotentialValuesCallSiteArgument(const IRPosition &IRP, Attributor &A)
7856       : AAPotentialValuesFloating(IRP, A) {}
7857 
7858   /// See AbstractAttribute::initialize(..).
initialize__anon0ce335533911::AAPotentialValuesCallSiteArgument7859   void initialize(Attributor &A) override {
7860     Value &V = getAssociatedValue();
7861 
7862     if (auto *C = dyn_cast<ConstantInt>(&V)) {
7863       unionAssumed(C->getValue());
7864       indicateOptimisticFixpoint();
7865       return;
7866     }
7867 
7868     if (isa<UndefValue>(&V)) {
7869       unionAssumedWithUndef();
7870       indicateOptimisticFixpoint();
7871       return;
7872     }
7873   }
7874 
7875   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335533911::AAPotentialValuesCallSiteArgument7876   ChangeStatus updateImpl(Attributor &A) override {
7877     Value &V = getAssociatedValue();
7878     auto AssumedBefore = getAssumed();
7879     auto &AA = A.getAAFor<AAPotentialValues>(*this, IRPosition::value(V));
7880     const auto &S = AA.getAssumed();
7881     unionAssumed(S);
7882     return AssumedBefore == getAssumed() ? ChangeStatus::UNCHANGED
7883                                          : ChangeStatus::CHANGED;
7884   }
7885 
7886   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AAPotentialValuesCallSiteArgument7887   void trackStatistics() const override {
7888     STATS_DECLTRACK_CSARG_ATTR(potential_values)
7889   }
7890 };
7891 
7892 /// ------------------------ NoUndef Attribute ---------------------------------
7893 struct AANoUndefImpl : AANoUndef {
AANoUndefImpl__anon0ce335533911::AANoUndefImpl7894   AANoUndefImpl(const IRPosition &IRP, Attributor &A) : AANoUndef(IRP, A) {}
7895 
7896   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335533911::AANoUndefImpl7897   void initialize(Attributor &A) override {
7898     if (getIRPosition().hasAttr({Attribute::NoUndef})) {
7899       indicateOptimisticFixpoint();
7900       return;
7901     }
7902     Value &V = getAssociatedValue();
7903     if (isa<UndefValue>(V))
7904       indicatePessimisticFixpoint();
7905     else if (isa<FreezeInst>(V))
7906       indicateOptimisticFixpoint();
7907     else if (getPositionKind() != IRPosition::IRP_RETURNED &&
7908              isGuaranteedNotToBeUndefOrPoison(&V))
7909       indicateOptimisticFixpoint();
7910     else
7911       AANoUndef::initialize(A);
7912   }
7913 
7914   /// See followUsesInMBEC
followUseInMBEC__anon0ce335533911::AANoUndefImpl7915   bool followUseInMBEC(Attributor &A, const Use *U, const Instruction *I,
7916                        AANoUndef::StateType &State) {
7917     const Value *UseV = U->get();
7918     const DominatorTree *DT = nullptr;
7919     AssumptionCache *AC = nullptr;
7920     InformationCache &InfoCache = A.getInfoCache();
7921     if (Function *F = getAnchorScope()) {
7922       DT = InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(*F);
7923       AC = InfoCache.getAnalysisResultForFunction<AssumptionAnalysis>(*F);
7924     }
7925     State.setKnown(isGuaranteedNotToBeUndefOrPoison(UseV, AC, I, DT));
7926     bool TrackUse = false;
7927     // Track use for instructions which must produce undef or poison bits when
7928     // at least one operand contains such bits.
7929     if (isa<CastInst>(*I) || isa<GetElementPtrInst>(*I))
7930       TrackUse = true;
7931     return TrackUse;
7932   }
7933 
7934   /// See AbstractAttribute::getAsStr().
getAsStr__anon0ce335533911::AANoUndefImpl7935   const std::string getAsStr() const override {
7936     return getAssumed() ? "noundef" : "may-undef-or-poison";
7937   }
7938 
manifest__anon0ce335533911::AANoUndefImpl7939   ChangeStatus manifest(Attributor &A) override {
7940     // We don't manifest noundef attribute for dead positions because the
7941     // associated values with dead positions would be replaced with undef
7942     // values.
7943     if (A.isAssumedDead(getIRPosition(), nullptr, nullptr))
7944       return ChangeStatus::UNCHANGED;
7945     // A position whose simplified value does not have any value is
7946     // considered to be dead. We don't manifest noundef in such positions for
7947     // the same reason above.
7948     auto &ValueSimplifyAA = A.getAAFor<AAValueSimplify>(
7949         *this, getIRPosition(), /* TrackDependence */ false);
7950     if (!ValueSimplifyAA.getAssumedSimplifiedValue(A).hasValue())
7951       return ChangeStatus::UNCHANGED;
7952     return AANoUndef::manifest(A);
7953   }
7954 };
7955 
7956 struct AANoUndefFloating : public AANoUndefImpl {
AANoUndefFloating__anon0ce335533911::AANoUndefFloating7957   AANoUndefFloating(const IRPosition &IRP, Attributor &A)
7958       : AANoUndefImpl(IRP, A) {}
7959 
7960   /// See AbstractAttribute::initialize(...).
initialize__anon0ce335533911::AANoUndefFloating7961   void initialize(Attributor &A) override {
7962     AANoUndefImpl::initialize(A);
7963     if (!getState().isAtFixpoint())
7964       if (Instruction *CtxI = getCtxI())
7965         followUsesInMBEC(*this, A, getState(), *CtxI);
7966   }
7967 
7968   /// See AbstractAttribute::updateImpl(...).
updateImpl__anon0ce335533911::AANoUndefFloating7969   ChangeStatus updateImpl(Attributor &A) override {
7970     auto VisitValueCB = [&](Value &V, const Instruction *CtxI,
7971                             AANoUndef::StateType &T, bool Stripped) -> bool {
7972       const auto &AA = A.getAAFor<AANoUndef>(*this, IRPosition::value(V));
7973       if (!Stripped && this == &AA) {
7974         T.indicatePessimisticFixpoint();
7975       } else {
7976         const AANoUndef::StateType &S =
7977             static_cast<const AANoUndef::StateType &>(AA.getState());
7978         T ^= S;
7979       }
7980       return T.isValidState();
7981     };
7982 
7983     StateType T;
7984     if (!genericValueTraversal<AANoUndef, StateType>(
7985             A, getIRPosition(), *this, T, VisitValueCB, getCtxI()))
7986       return indicatePessimisticFixpoint();
7987 
7988     return clampStateAndIndicateChange(getState(), T);
7989   }
7990 
7991   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AANoUndefFloating7992   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
7993 };
7994 
7995 struct AANoUndefReturned final
7996     : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl> {
AANoUndefReturned__anon0ce335533911::AANoUndefReturned7997   AANoUndefReturned(const IRPosition &IRP, Attributor &A)
7998       : AAReturnedFromReturnedValues<AANoUndef, AANoUndefImpl>(IRP, A) {}
7999 
8000   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AANoUndefReturned8001   void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noundef) }
8002 };
8003 
8004 struct AANoUndefArgument final
8005     : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl> {
AANoUndefArgument__anon0ce335533911::AANoUndefArgument8006   AANoUndefArgument(const IRPosition &IRP, Attributor &A)
8007       : AAArgumentFromCallSiteArguments<AANoUndef, AANoUndefImpl>(IRP, A) {}
8008 
8009   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AANoUndefArgument8010   void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noundef) }
8011 };
8012 
8013 struct AANoUndefCallSiteArgument final : AANoUndefFloating {
AANoUndefCallSiteArgument__anon0ce335533911::AANoUndefCallSiteArgument8014   AANoUndefCallSiteArgument(const IRPosition &IRP, Attributor &A)
8015       : AANoUndefFloating(IRP, A) {}
8016 
8017   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AANoUndefCallSiteArgument8018   void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(noundef) }
8019 };
8020 
8021 struct AANoUndefCallSiteReturned final
8022     : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl> {
AANoUndefCallSiteReturned__anon0ce335533911::AANoUndefCallSiteReturned8023   AANoUndefCallSiteReturned(const IRPosition &IRP, Attributor &A)
8024       : AACallSiteReturnedFromReturned<AANoUndef, AANoUndefImpl>(IRP, A) {}
8025 
8026   /// See AbstractAttribute::trackStatistics()
trackStatistics__anon0ce335533911::AANoUndefCallSiteReturned8027   void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(noundef) }
8028 };
8029 } // namespace
8030 
8031 const char AAReturnedValues::ID = 0;
8032 const char AANoUnwind::ID = 0;
8033 const char AANoSync::ID = 0;
8034 const char AANoFree::ID = 0;
8035 const char AANonNull::ID = 0;
8036 const char AANoRecurse::ID = 0;
8037 const char AAWillReturn::ID = 0;
8038 const char AAUndefinedBehavior::ID = 0;
8039 const char AANoAlias::ID = 0;
8040 const char AAReachability::ID = 0;
8041 const char AANoReturn::ID = 0;
8042 const char AAIsDead::ID = 0;
8043 const char AADereferenceable::ID = 0;
8044 const char AAAlign::ID = 0;
8045 const char AANoCapture::ID = 0;
8046 const char AAValueSimplify::ID = 0;
8047 const char AAHeapToStack::ID = 0;
8048 const char AAPrivatizablePtr::ID = 0;
8049 const char AAMemoryBehavior::ID = 0;
8050 const char AAMemoryLocation::ID = 0;
8051 const char AAValueConstantRange::ID = 0;
8052 const char AAPotentialValues::ID = 0;
8053 const char AANoUndef::ID = 0;
8054 
8055 // Macro magic to create the static generator function for attributes that
8056 // follow the naming scheme.
8057 
8058 #define SWITCH_PK_INV(CLASS, PK, POS_NAME)                                     \
8059   case IRPosition::PK:                                                         \
8060     llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
8061 
8062 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX)                               \
8063   case IRPosition::PK:                                                         \
8064     AA = new (A.Allocator) CLASS##SUFFIX(IRP, A);                              \
8065     ++NumAAs;                                                                  \
8066     break;
8067 
8068 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                 \
8069   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8070     CLASS *AA = nullptr;                                                       \
8071     switch (IRP.getPositionKind()) {                                           \
8072       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8073       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8074       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8075       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8076       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8077       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8078       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8079       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8080     }                                                                          \
8081     return *AA;                                                                \
8082   }
8083 
8084 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                    \
8085   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8086     CLASS *AA = nullptr;                                                       \
8087     switch (IRP.getPositionKind()) {                                           \
8088       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8089       SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function")                           \
8090       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8091       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8092       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8093       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8094       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8095       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8096     }                                                                          \
8097     return *AA;                                                                \
8098   }
8099 
8100 #define CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                      \
8101   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8102     CLASS *AA = nullptr;                                                       \
8103     switch (IRP.getPositionKind()) {                                           \
8104       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8105       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8106       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8107       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8108       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8109       SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned)                     \
8110       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8111       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8112     }                                                                          \
8113     return *AA;                                                                \
8114   }
8115 
8116 #define CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)            \
8117   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8118     CLASS *AA = nullptr;                                                       \
8119     switch (IRP.getPositionKind()) {                                           \
8120       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8121       SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument")                           \
8122       SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating")                              \
8123       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8124       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned")       \
8125       SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument")       \
8126       SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site")                         \
8127       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8128     }                                                                          \
8129     return *AA;                                                                \
8130   }
8131 
8132 #define CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS)                  \
8133   CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) {      \
8134     CLASS *AA = nullptr;                                                       \
8135     switch (IRP.getPositionKind()) {                                           \
8136       SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid")                             \
8137       SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned")                           \
8138       SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function)                     \
8139       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite)                    \
8140       SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating)                        \
8141       SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument)                     \
8142       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned)   \
8143       SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument)   \
8144     }                                                                          \
8145     return *AA;                                                                \
8146   }
8147 
8148 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
8149 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
8150 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
8151 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
8152 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
8153 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
8154 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryLocation)
8155 
8156 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
8157 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
8158 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPrivatizablePtr)
8159 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
8160 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
8161 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture)
8162 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueConstantRange)
8163 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAPotentialValues)
8164 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUndef)
8165 
8166 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAValueSimplify)
8167 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
8168 CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
8169 
8170 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAHeapToStack)
8171 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReachability)
8172 CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAUndefinedBehavior)
8173 
8174 CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAMemoryBehavior)
8175 
8176 #undef CREATE_FUNCTION_ONLY_ABSTRACT_ATTRIBUTE_FOR_POSITION
8177 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
8178 #undef CREATE_NON_RET_ABSTRACT_ATTRIBUTE_FOR_POSITION
8179 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
8180 #undef CREATE_ALL_ABSTRACT_ATTRIBUTE_FOR_POSITION
8181 #undef SWITCH_PK_CREATE
8182 #undef SWITCH_PK_INV
8183