• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
12 /// in Objective C.
13 ///
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
17 ///
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
20 ///
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
24 ///
25 //===----------------------------------------------------------------------===//
26 
27 #include "ObjCARC.h"
28 #include "ARCRuntimeEntryPoints.h"
29 #include "BlotMapVector.h"
30 #include "DependencyAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "PtrState.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/DenseSet.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/Statistic.h"
38 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
39 #include "llvm/IR/CFG.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/raw_ostream.h"
44 
45 using namespace llvm;
46 using namespace llvm::objcarc;
47 
48 #define DEBUG_TYPE "objc-arc-opts"
49 
50 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
51 /// @{
52 
53 /// \brief This is similar to GetRCIdentityRoot but it stops as soon
54 /// as it finds a value with multiple uses.
FindSingleUseIdentifiedObject(const Value * Arg)55 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
56   if (Arg->hasOneUse()) {
57     if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
58       return FindSingleUseIdentifiedObject(BC->getOperand(0));
59     if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
60       if (GEP->hasAllZeroIndices())
61         return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
62     if (IsForwarding(GetBasicARCInstKind(Arg)))
63       return FindSingleUseIdentifiedObject(
64                cast<CallInst>(Arg)->getArgOperand(0));
65     if (!IsObjCIdentifiedObject(Arg))
66       return nullptr;
67     return Arg;
68   }
69 
70   // If we found an identifiable object but it has multiple uses, but they are
71   // trivial uses, we can still consider this to be a single-use value.
72   if (IsObjCIdentifiedObject(Arg)) {
73     for (const User *U : Arg->users())
74       if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
75          return nullptr;
76 
77     return Arg;
78   }
79 
80   return nullptr;
81 }
82 
83 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
84 /// GetUnderlyingObjects except that it returns early when it sees the first
85 /// alloca.
AreAnyUnderlyingObjectsAnAlloca(const Value * V,const DataLayout & DL)86 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V,
87                                                    const DataLayout &DL) {
88   SmallPtrSet<const Value *, 4> Visited;
89   SmallVector<const Value *, 4> Worklist;
90   Worklist.push_back(V);
91   do {
92     const Value *P = Worklist.pop_back_val();
93     P = GetUnderlyingObjCPtr(P, DL);
94 
95     if (isa<AllocaInst>(P))
96       return true;
97 
98     if (!Visited.insert(P).second)
99       continue;
100 
101     if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
102       Worklist.push_back(SI->getTrueValue());
103       Worklist.push_back(SI->getFalseValue());
104       continue;
105     }
106 
107     if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
108       for (Value *IncValue : PN->incoming_values())
109         Worklist.push_back(IncValue);
110       continue;
111     }
112   } while (!Worklist.empty());
113 
114   return false;
115 }
116 
117 
118 /// @}
119 ///
120 /// \defgroup ARCOpt ARC Optimization.
121 /// @{
122 
123 // TODO: On code like this:
124 //
125 // objc_retain(%x)
126 // stuff_that_cannot_release()
127 // objc_autorelease(%x)
128 // stuff_that_cannot_release()
129 // objc_retain(%x)
130 // stuff_that_cannot_release()
131 // objc_autorelease(%x)
132 //
133 // The second retain and autorelease can be deleted.
134 
135 // TODO: It should be possible to delete
136 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
137 // pairs if nothing is actually autoreleased between them. Also, autorelease
138 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
139 // after inlining) can be turned into plain release calls.
140 
141 // TODO: Critical-edge splitting. If the optimial insertion point is
142 // a critical edge, the current algorithm has to fail, because it doesn't
143 // know how to split edges. It should be possible to make the optimizer
144 // think in terms of edges, rather than blocks, and then split critical
145 // edges on demand.
146 
147 // TODO: OptimizeSequences could generalized to be Interprocedural.
148 
149 // TODO: Recognize that a bunch of other objc runtime calls have
150 // non-escaping arguments and non-releasing arguments, and may be
151 // non-autoreleasing.
152 
153 // TODO: Sink autorelease calls as far as possible. Unfortunately we
154 // usually can't sink them past other calls, which would be the main
155 // case where it would be useful.
156 
157 // TODO: The pointer returned from objc_loadWeakRetained is retained.
158 
159 // TODO: Delete release+retain pairs (rare).
160 
161 STATISTIC(NumNoops,       "Number of no-op objc calls eliminated");
162 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
163 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
164 STATISTIC(NumRets,        "Number of return value forwarding "
165                           "retain+autoreleases eliminated");
166 STATISTIC(NumRRs,         "Number of retain+release paths eliminated");
167 STATISTIC(NumPeeps,       "Number of calls peephole-optimized");
168 #ifndef NDEBUG
169 STATISTIC(NumRetainsBeforeOpt,
170           "Number of retains before optimization");
171 STATISTIC(NumReleasesBeforeOpt,
172           "Number of releases before optimization");
173 STATISTIC(NumRetainsAfterOpt,
174           "Number of retains after optimization");
175 STATISTIC(NumReleasesAfterOpt,
176           "Number of releases after optimization");
177 #endif
178 
179 namespace {
180   /// \brief Per-BasicBlock state.
181   class BBState {
182     /// The number of unique control paths from the entry which can reach this
183     /// block.
184     unsigned TopDownPathCount;
185 
186     /// The number of unique control paths to exits from this block.
187     unsigned BottomUpPathCount;
188 
189     /// The top-down traversal uses this to record information known about a
190     /// pointer at the bottom of each block.
191     BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
192 
193     /// The bottom-up traversal uses this to record information known about a
194     /// pointer at the top of each block.
195     BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
196 
197     /// Effective predecessors of the current block ignoring ignorable edges and
198     /// ignored backedges.
199     SmallVector<BasicBlock *, 2> Preds;
200 
201     /// Effective successors of the current block ignoring ignorable edges and
202     /// ignored backedges.
203     SmallVector<BasicBlock *, 2> Succs;
204 
205   public:
206     static const unsigned OverflowOccurredValue;
207 
BBState()208     BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
209 
210     typedef decltype(PerPtrTopDown)::iterator top_down_ptr_iterator;
211     typedef decltype(PerPtrTopDown)::const_iterator const_top_down_ptr_iterator;
212 
top_down_ptr_begin()213     top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
top_down_ptr_end()214     top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
top_down_ptr_begin() const215     const_top_down_ptr_iterator top_down_ptr_begin() const {
216       return PerPtrTopDown.begin();
217     }
top_down_ptr_end() const218     const_top_down_ptr_iterator top_down_ptr_end() const {
219       return PerPtrTopDown.end();
220     }
hasTopDownPtrs() const221     bool hasTopDownPtrs() const {
222       return !PerPtrTopDown.empty();
223     }
224 
225     typedef decltype(PerPtrBottomUp)::iterator bottom_up_ptr_iterator;
226     typedef decltype(
227         PerPtrBottomUp)::const_iterator const_bottom_up_ptr_iterator;
228 
bottom_up_ptr_begin()229     bottom_up_ptr_iterator bottom_up_ptr_begin() {
230       return PerPtrBottomUp.begin();
231     }
bottom_up_ptr_end()232     bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
bottom_up_ptr_begin() const233     const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
234       return PerPtrBottomUp.begin();
235     }
bottom_up_ptr_end() const236     const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
237       return PerPtrBottomUp.end();
238     }
hasBottomUpPtrs() const239     bool hasBottomUpPtrs() const {
240       return !PerPtrBottomUp.empty();
241     }
242 
243     /// Mark this block as being an entry block, which has one path from the
244     /// entry by definition.
SetAsEntry()245     void SetAsEntry() { TopDownPathCount = 1; }
246 
247     /// Mark this block as being an exit block, which has one path to an exit by
248     /// definition.
SetAsExit()249     void SetAsExit()  { BottomUpPathCount = 1; }
250 
251     /// Attempt to find the PtrState object describing the top down state for
252     /// pointer Arg. Return a new initialized PtrState describing the top down
253     /// state for Arg if we do not find one.
getPtrTopDownState(const Value * Arg)254     TopDownPtrState &getPtrTopDownState(const Value *Arg) {
255       return PerPtrTopDown[Arg];
256     }
257 
258     /// Attempt to find the PtrState object describing the bottom up state for
259     /// pointer Arg. Return a new initialized PtrState describing the bottom up
260     /// state for Arg if we do not find one.
getPtrBottomUpState(const Value * Arg)261     BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
262       return PerPtrBottomUp[Arg];
263     }
264 
265     /// Attempt to find the PtrState object describing the bottom up state for
266     /// pointer Arg.
findPtrBottomUpState(const Value * Arg)267     bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
268       return PerPtrBottomUp.find(Arg);
269     }
270 
clearBottomUpPointers()271     void clearBottomUpPointers() {
272       PerPtrBottomUp.clear();
273     }
274 
clearTopDownPointers()275     void clearTopDownPointers() {
276       PerPtrTopDown.clear();
277     }
278 
279     void InitFromPred(const BBState &Other);
280     void InitFromSucc(const BBState &Other);
281     void MergePred(const BBState &Other);
282     void MergeSucc(const BBState &Other);
283 
284     /// Compute the number of possible unique paths from an entry to an exit
285     /// which pass through this block. This is only valid after both the
286     /// top-down and bottom-up traversals are complete.
287     ///
288     /// Returns true if overflow occurred. Returns false if overflow did not
289     /// occur.
GetAllPathCountWithOverflow(unsigned & PathCount) const290     bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
291       if (TopDownPathCount == OverflowOccurredValue ||
292           BottomUpPathCount == OverflowOccurredValue)
293         return true;
294       unsigned long long Product =
295         (unsigned long long)TopDownPathCount*BottomUpPathCount;
296       // Overflow occurred if any of the upper bits of Product are set or if all
297       // the lower bits of Product are all set.
298       return (Product >> 32) ||
299              ((PathCount = Product) == OverflowOccurredValue);
300     }
301 
302     // Specialized CFG utilities.
303     typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
pred_begin() const304     edge_iterator pred_begin() const { return Preds.begin(); }
pred_end() const305     edge_iterator pred_end() const { return Preds.end(); }
succ_begin() const306     edge_iterator succ_begin() const { return Succs.begin(); }
succ_end() const307     edge_iterator succ_end() const { return Succs.end(); }
308 
addSucc(BasicBlock * Succ)309     void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
addPred(BasicBlock * Pred)310     void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
311 
isExit() const312     bool isExit() const { return Succs.empty(); }
313   };
314 
315   const unsigned BBState::OverflowOccurredValue = 0xffffffff;
316 }
317 
318 namespace llvm {
319 raw_ostream &operator<<(raw_ostream &OS,
320                         BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
321 }
322 
InitFromPred(const BBState & Other)323 void BBState::InitFromPred(const BBState &Other) {
324   PerPtrTopDown = Other.PerPtrTopDown;
325   TopDownPathCount = Other.TopDownPathCount;
326 }
327 
InitFromSucc(const BBState & Other)328 void BBState::InitFromSucc(const BBState &Other) {
329   PerPtrBottomUp = Other.PerPtrBottomUp;
330   BottomUpPathCount = Other.BottomUpPathCount;
331 }
332 
333 /// The top-down traversal uses this to merge information about predecessors to
334 /// form the initial state for a new block.
MergePred(const BBState & Other)335 void BBState::MergePred(const BBState &Other) {
336   if (TopDownPathCount == OverflowOccurredValue)
337     return;
338 
339   // Other.TopDownPathCount can be 0, in which case it is either dead or a
340   // loop backedge. Loop backedges are special.
341   TopDownPathCount += Other.TopDownPathCount;
342 
343   // In order to be consistent, we clear the top down pointers when by adding
344   // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
345   // has not occurred.
346   if (TopDownPathCount == OverflowOccurredValue) {
347     clearTopDownPointers();
348     return;
349   }
350 
351   // Check for overflow. If we have overflow, fall back to conservative
352   // behavior.
353   if (TopDownPathCount < Other.TopDownPathCount) {
354     TopDownPathCount = OverflowOccurredValue;
355     clearTopDownPointers();
356     return;
357   }
358 
359   // For each entry in the other set, if our set has an entry with the same key,
360   // merge the entries. Otherwise, copy the entry and merge it with an empty
361   // entry.
362   for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
363        MI != ME; ++MI) {
364     auto Pair = PerPtrTopDown.insert(*MI);
365     Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
366                              /*TopDown=*/true);
367   }
368 
369   // For each entry in our set, if the other set doesn't have an entry with the
370   // same key, force it to merge with an empty entry.
371   for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
372     if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
373       MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
374 }
375 
376 /// The bottom-up traversal uses this to merge information about successors to
377 /// form the initial state for a new block.
MergeSucc(const BBState & Other)378 void BBState::MergeSucc(const BBState &Other) {
379   if (BottomUpPathCount == OverflowOccurredValue)
380     return;
381 
382   // Other.BottomUpPathCount can be 0, in which case it is either dead or a
383   // loop backedge. Loop backedges are special.
384   BottomUpPathCount += Other.BottomUpPathCount;
385 
386   // In order to be consistent, we clear the top down pointers when by adding
387   // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
388   // has not occurred.
389   if (BottomUpPathCount == OverflowOccurredValue) {
390     clearBottomUpPointers();
391     return;
392   }
393 
394   // Check for overflow. If we have overflow, fall back to conservative
395   // behavior.
396   if (BottomUpPathCount < Other.BottomUpPathCount) {
397     BottomUpPathCount = OverflowOccurredValue;
398     clearBottomUpPointers();
399     return;
400   }
401 
402   // For each entry in the other set, if our set has an entry with the
403   // same key, merge the entries. Otherwise, copy the entry and merge
404   // it with an empty entry.
405   for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
406        MI != ME; ++MI) {
407     auto Pair = PerPtrBottomUp.insert(*MI);
408     Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
409                              /*TopDown=*/false);
410   }
411 
412   // For each entry in our set, if the other set doesn't have an entry
413   // with the same key, force it to merge with an empty entry.
414   for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
415        ++MI)
416     if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
417       MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
418 }
419 
operator <<(raw_ostream & OS,BBState & BBInfo)420 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
421   // Dump the pointers we are tracking.
422   OS << "    TopDown State:\n";
423   if (!BBInfo.hasTopDownPtrs()) {
424     DEBUG(llvm::dbgs() << "        NONE!\n");
425   } else {
426     for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
427          I != E; ++I) {
428       const PtrState &P = I->second;
429       OS << "        Ptr: " << *I->first
430          << "\n            KnownSafe:        " << (P.IsKnownSafe()?"true":"false")
431          << "\n            ImpreciseRelease: "
432            << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
433          << "            HasCFGHazards:    "
434            << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
435          << "            KnownPositive:    "
436            << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
437          << "            Seq:              "
438          << P.GetSeq() << "\n";
439     }
440   }
441 
442   OS << "    BottomUp State:\n";
443   if (!BBInfo.hasBottomUpPtrs()) {
444     DEBUG(llvm::dbgs() << "        NONE!\n");
445   } else {
446     for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
447          I != E; ++I) {
448       const PtrState &P = I->second;
449       OS << "        Ptr: " << *I->first
450          << "\n            KnownSafe:        " << (P.IsKnownSafe()?"true":"false")
451          << "\n            ImpreciseRelease: "
452            << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
453          << "            HasCFGHazards:    "
454            << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
455          << "            KnownPositive:    "
456            << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
457          << "            Seq:              "
458          << P.GetSeq() << "\n";
459     }
460   }
461 
462   return OS;
463 }
464 
465 namespace {
466 
467   /// \brief The main ARC optimization pass.
468   class ObjCARCOpt : public FunctionPass {
469     bool Changed;
470     ProvenanceAnalysis PA;
471 
472     /// A cache of references to runtime entry point constants.
473     ARCRuntimeEntryPoints EP;
474 
475     /// A cache of MDKinds that can be passed into other functions to propagate
476     /// MDKind identifiers.
477     ARCMDKindCache MDKindCache;
478 
479     // This is used to track if a pointer is stored into an alloca.
480     DenseSet<const Value *> MultiOwnersSet;
481 
482     /// A flag indicating whether this optimization pass should run.
483     bool Run;
484 
485     /// Flags which determine whether each of the interesting runtime functions
486     /// is in fact used in the current function.
487     unsigned UsedInThisFunction;
488 
489     bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
490     void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
491                                    ARCInstKind &Class);
492     void OptimizeIndividualCalls(Function &F);
493 
494     void CheckForCFGHazards(const BasicBlock *BB,
495                             DenseMap<const BasicBlock *, BBState> &BBStates,
496                             BBState &MyStates) const;
497     bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
498                                   BlotMapVector<Value *, RRInfo> &Retains,
499                                   BBState &MyStates);
500     bool VisitBottomUp(BasicBlock *BB,
501                        DenseMap<const BasicBlock *, BBState> &BBStates,
502                        BlotMapVector<Value *, RRInfo> &Retains);
503     bool VisitInstructionTopDown(Instruction *Inst,
504                                  DenseMap<Value *, RRInfo> &Releases,
505                                  BBState &MyStates);
506     bool VisitTopDown(BasicBlock *BB,
507                       DenseMap<const BasicBlock *, BBState> &BBStates,
508                       DenseMap<Value *, RRInfo> &Releases);
509     bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
510                BlotMapVector<Value *, RRInfo> &Retains,
511                DenseMap<Value *, RRInfo> &Releases);
512 
513     void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
514                    BlotMapVector<Value *, RRInfo> &Retains,
515                    DenseMap<Value *, RRInfo> &Releases,
516                    SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
517 
518     bool
519     PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
520                              BlotMapVector<Value *, RRInfo> &Retains,
521                              DenseMap<Value *, RRInfo> &Releases, Module *M,
522                              SmallVectorImpl<Instruction *> &NewRetains,
523                              SmallVectorImpl<Instruction *> &NewReleases,
524                              SmallVectorImpl<Instruction *> &DeadInsts,
525                              RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
526                              Value *Arg, bool KnownSafe,
527                              bool &AnyPairsCompletelyEliminated);
528 
529     bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
530                               BlotMapVector<Value *, RRInfo> &Retains,
531                               DenseMap<Value *, RRInfo> &Releases, Module *M);
532 
533     void OptimizeWeakCalls(Function &F);
534 
535     bool OptimizeSequences(Function &F);
536 
537     void OptimizeReturns(Function &F);
538 
539 #ifndef NDEBUG
540     void GatherStatistics(Function &F, bool AfterOptimization = false);
541 #endif
542 
543     void getAnalysisUsage(AnalysisUsage &AU) const override;
544     bool doInitialization(Module &M) override;
545     bool runOnFunction(Function &F) override;
546     void releaseMemory() override;
547 
548   public:
549     static char ID;
ObjCARCOpt()550     ObjCARCOpt() : FunctionPass(ID) {
551       initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
552     }
553   };
554 }
555 
556 char ObjCARCOpt::ID = 0;
557 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
558                       "objc-arc", "ObjC ARC optimization", false, false)
INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)559 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
560 INITIALIZE_PASS_END(ObjCARCOpt,
561                     "objc-arc", "ObjC ARC optimization", false, false)
562 
563 Pass *llvm::createObjCARCOptPass() {
564   return new ObjCARCOpt();
565 }
566 
getAnalysisUsage(AnalysisUsage & AU) const567 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
568   AU.addRequired<ObjCARCAAWrapperPass>();
569   AU.addRequired<AAResultsWrapperPass>();
570   // ARC optimization doesn't currently split critical edges.
571   AU.setPreservesCFG();
572 }
573 
574 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
575 /// not a return value.  Or, if it can be paired with an
576 /// objc_autoreleaseReturnValue, delete the pair and return true.
577 bool
OptimizeRetainRVCall(Function & F,Instruction * RetainRV)578 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
579   // Check for the argument being from an immediately preceding call or invoke.
580   const Value *Arg = GetArgRCIdentityRoot(RetainRV);
581   ImmutableCallSite CS(Arg);
582   if (const Instruction *Call = CS.getInstruction()) {
583     if (Call->getParent() == RetainRV->getParent()) {
584       BasicBlock::const_iterator I(Call);
585       ++I;
586       while (IsNoopInstruction(&*I))
587         ++I;
588       if (&*I == RetainRV)
589         return false;
590     } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
591       BasicBlock *RetainRVParent = RetainRV->getParent();
592       if (II->getNormalDest() == RetainRVParent) {
593         BasicBlock::const_iterator I = RetainRVParent->begin();
594         while (IsNoopInstruction(&*I))
595           ++I;
596         if (&*I == RetainRV)
597           return false;
598       }
599     }
600   }
601 
602   // Check for being preceded by an objc_autoreleaseReturnValue on the same
603   // pointer. In this case, we can delete the pair.
604   BasicBlock::iterator I = RetainRV->getIterator(),
605                        Begin = RetainRV->getParent()->begin();
606   if (I != Begin) {
607     do
608       --I;
609     while (I != Begin && IsNoopInstruction(&*I));
610     if (GetBasicARCInstKind(&*I) == ARCInstKind::AutoreleaseRV &&
611         GetArgRCIdentityRoot(&*I) == Arg) {
612       Changed = true;
613       ++NumPeeps;
614 
615       DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
616                    << "Erasing " << *RetainRV << "\n");
617 
618       EraseInstruction(&*I);
619       EraseInstruction(RetainRV);
620       return true;
621     }
622   }
623 
624   // Turn it to a plain objc_retain.
625   Changed = true;
626   ++NumPeeps;
627 
628   DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
629                   "objc_retain since the operand is not a return value.\n"
630                   "Old = " << *RetainRV << "\n");
631 
632   Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
633   cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
634 
635   DEBUG(dbgs() << "New = " << *RetainRV << "\n");
636 
637   return false;
638 }
639 
640 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
641 /// used as a return value.
OptimizeAutoreleaseRVCall(Function & F,Instruction * AutoreleaseRV,ARCInstKind & Class)642 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
643                                            Instruction *AutoreleaseRV,
644                                            ARCInstKind &Class) {
645   // Check for a return of the pointer value.
646   const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
647   SmallVector<const Value *, 2> Users;
648   Users.push_back(Ptr);
649   do {
650     Ptr = Users.pop_back_val();
651     for (const User *U : Ptr->users()) {
652       if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
653         return;
654       if (isa<BitCastInst>(U))
655         Users.push_back(U);
656     }
657   } while (!Users.empty());
658 
659   Changed = true;
660   ++NumPeeps;
661 
662   DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
663                   "objc_autorelease since its operand is not used as a return "
664                   "value.\n"
665                   "Old = " << *AutoreleaseRV << "\n");
666 
667   CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
668   Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
669   AutoreleaseRVCI->setCalledFunction(NewDecl);
670   AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
671   Class = ARCInstKind::Autorelease;
672 
673   DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
674 
675 }
676 
677 /// Visit each call, one at a time, and make simplifications without doing any
678 /// additional analysis.
OptimizeIndividualCalls(Function & F)679 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
680   DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
681   // Reset all the flags in preparation for recomputing them.
682   UsedInThisFunction = 0;
683 
684   // Visit all objc_* calls in F.
685   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
686     Instruction *Inst = &*I++;
687 
688     ARCInstKind Class = GetBasicARCInstKind(Inst);
689 
690     DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
691 
692     switch (Class) {
693     default: break;
694 
695     // Delete no-op casts. These function calls have special semantics, but
696     // the semantics are entirely implemented via lowering in the front-end,
697     // so by the time they reach the optimizer, they are just no-op calls
698     // which return their argument.
699     //
700     // There are gray areas here, as the ability to cast reference-counted
701     // pointers to raw void* and back allows code to break ARC assumptions,
702     // however these are currently considered to be unimportant.
703     case ARCInstKind::NoopCast:
704       Changed = true;
705       ++NumNoops;
706       DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
707       EraseInstruction(Inst);
708       continue;
709 
710     // If the pointer-to-weak-pointer is null, it's undefined behavior.
711     case ARCInstKind::StoreWeak:
712     case ARCInstKind::LoadWeak:
713     case ARCInstKind::LoadWeakRetained:
714     case ARCInstKind::InitWeak:
715     case ARCInstKind::DestroyWeak: {
716       CallInst *CI = cast<CallInst>(Inst);
717       if (IsNullOrUndef(CI->getArgOperand(0))) {
718         Changed = true;
719         Type *Ty = CI->getArgOperand(0)->getType();
720         new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
721                       Constant::getNullValue(Ty),
722                       CI);
723         llvm::Value *NewValue = UndefValue::get(CI->getType());
724         DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
725                        "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
726         CI->replaceAllUsesWith(NewValue);
727         CI->eraseFromParent();
728         continue;
729       }
730       break;
731     }
732     case ARCInstKind::CopyWeak:
733     case ARCInstKind::MoveWeak: {
734       CallInst *CI = cast<CallInst>(Inst);
735       if (IsNullOrUndef(CI->getArgOperand(0)) ||
736           IsNullOrUndef(CI->getArgOperand(1))) {
737         Changed = true;
738         Type *Ty = CI->getArgOperand(0)->getType();
739         new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
740                       Constant::getNullValue(Ty),
741                       CI);
742 
743         llvm::Value *NewValue = UndefValue::get(CI->getType());
744         DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
745                         "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
746 
747         CI->replaceAllUsesWith(NewValue);
748         CI->eraseFromParent();
749         continue;
750       }
751       break;
752     }
753     case ARCInstKind::RetainRV:
754       if (OptimizeRetainRVCall(F, Inst))
755         continue;
756       break;
757     case ARCInstKind::AutoreleaseRV:
758       OptimizeAutoreleaseRVCall(F, Inst, Class);
759       break;
760     }
761 
762     // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
763     if (IsAutorelease(Class) && Inst->use_empty()) {
764       CallInst *Call = cast<CallInst>(Inst);
765       const Value *Arg = Call->getArgOperand(0);
766       Arg = FindSingleUseIdentifiedObject(Arg);
767       if (Arg) {
768         Changed = true;
769         ++NumAutoreleases;
770 
771         // Create the declaration lazily.
772         LLVMContext &C = Inst->getContext();
773 
774         Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
775         CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
776                                              Call);
777         NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
778                              MDNode::get(C, None));
779 
780         DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
781               "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
782               << *NewCall << "\n");
783 
784         EraseInstruction(Call);
785         Inst = NewCall;
786         Class = ARCInstKind::Release;
787       }
788     }
789 
790     // For functions which can never be passed stack arguments, add
791     // a tail keyword.
792     if (IsAlwaysTail(Class)) {
793       Changed = true;
794       DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
795                       "passed stack args: " << *Inst << "\n");
796       cast<CallInst>(Inst)->setTailCall();
797     }
798 
799     // Ensure that functions that can never have a "tail" keyword due to the
800     // semantics of ARC truly do not do so.
801     if (IsNeverTail(Class)) {
802       Changed = true;
803       DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
804             "\n");
805       cast<CallInst>(Inst)->setTailCall(false);
806     }
807 
808     // Set nounwind as needed.
809     if (IsNoThrow(Class)) {
810       Changed = true;
811       DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
812                    << "\n");
813       cast<CallInst>(Inst)->setDoesNotThrow();
814     }
815 
816     if (!IsNoopOnNull(Class)) {
817       UsedInThisFunction |= 1 << unsigned(Class);
818       continue;
819     }
820 
821     const Value *Arg = GetArgRCIdentityRoot(Inst);
822 
823     // ARC calls with null are no-ops. Delete them.
824     if (IsNullOrUndef(Arg)) {
825       Changed = true;
826       ++NumNoops;
827       DEBUG(dbgs() << "ARC calls with  null are no-ops. Erasing: " << *Inst
828             << "\n");
829       EraseInstruction(Inst);
830       continue;
831     }
832 
833     // Keep track of which of retain, release, autorelease, and retain_block
834     // are actually present in this function.
835     UsedInThisFunction |= 1 << unsigned(Class);
836 
837     // If Arg is a PHI, and one or more incoming values to the
838     // PHI are null, and the call is control-equivalent to the PHI, and there
839     // are no relevant side effects between the PHI and the call, the call
840     // could be pushed up to just those paths with non-null incoming values.
841     // For now, don't bother splitting critical edges for this.
842     SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
843     Worklist.push_back(std::make_pair(Inst, Arg));
844     do {
845       std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
846       Inst = Pair.first;
847       Arg = Pair.second;
848 
849       const PHINode *PN = dyn_cast<PHINode>(Arg);
850       if (!PN) continue;
851 
852       // Determine if the PHI has any null operands, or any incoming
853       // critical edges.
854       bool HasNull = false;
855       bool HasCriticalEdges = false;
856       for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
857         Value *Incoming =
858           GetRCIdentityRoot(PN->getIncomingValue(i));
859         if (IsNullOrUndef(Incoming))
860           HasNull = true;
861         else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
862                    .getNumSuccessors() != 1) {
863           HasCriticalEdges = true;
864           break;
865         }
866       }
867       // If we have null operands and no critical edges, optimize.
868       if (!HasCriticalEdges && HasNull) {
869         SmallPtrSet<Instruction *, 4> DependingInstructions;
870         SmallPtrSet<const BasicBlock *, 4> Visited;
871 
872         // Check that there is nothing that cares about the reference
873         // count between the call and the phi.
874         switch (Class) {
875         case ARCInstKind::Retain:
876         case ARCInstKind::RetainBlock:
877           // These can always be moved up.
878           break;
879         case ARCInstKind::Release:
880           // These can't be moved across things that care about the retain
881           // count.
882           FindDependencies(NeedsPositiveRetainCount, Arg,
883                            Inst->getParent(), Inst,
884                            DependingInstructions, Visited, PA);
885           break;
886         case ARCInstKind::Autorelease:
887           // These can't be moved across autorelease pool scope boundaries.
888           FindDependencies(AutoreleasePoolBoundary, Arg,
889                            Inst->getParent(), Inst,
890                            DependingInstructions, Visited, PA);
891           break;
892         case ARCInstKind::RetainRV:
893         case ARCInstKind::AutoreleaseRV:
894           // Don't move these; the RV optimization depends on the autoreleaseRV
895           // being tail called, and the retainRV being immediately after a call
896           // (which might still happen if we get lucky with codegen layout, but
897           // it's not worth taking the chance).
898           continue;
899         default:
900           llvm_unreachable("Invalid dependence flavor");
901         }
902 
903         if (DependingInstructions.size() == 1 &&
904             *DependingInstructions.begin() == PN) {
905           Changed = true;
906           ++NumPartialNoops;
907           // Clone the call into each predecessor that has a non-null value.
908           CallInst *CInst = cast<CallInst>(Inst);
909           Type *ParamTy = CInst->getArgOperand(0)->getType();
910           for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
911             Value *Incoming =
912               GetRCIdentityRoot(PN->getIncomingValue(i));
913             if (!IsNullOrUndef(Incoming)) {
914               CallInst *Clone = cast<CallInst>(CInst->clone());
915               Value *Op = PN->getIncomingValue(i);
916               Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
917               if (Op->getType() != ParamTy)
918                 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
919               Clone->setArgOperand(0, Op);
920               Clone->insertBefore(InsertPos);
921 
922               DEBUG(dbgs() << "Cloning "
923                            << *CInst << "\n"
924                            "And inserting clone at " << *InsertPos << "\n");
925               Worklist.push_back(std::make_pair(Clone, Incoming));
926             }
927           }
928           // Erase the original call.
929           DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
930           EraseInstruction(CInst);
931           continue;
932         }
933       }
934     } while (!Worklist.empty());
935   }
936 }
937 
938 /// If we have a top down pointer in the S_Use state, make sure that there are
939 /// no CFG hazards by checking the states of various bottom up pointers.
CheckForUseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe,bool & ShouldContinue)940 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
941                                  const bool SuccSRRIKnownSafe,
942                                  TopDownPtrState &S,
943                                  bool &SomeSuccHasSame,
944                                  bool &AllSuccsHaveSame,
945                                  bool &NotAllSeqEqualButKnownSafe,
946                                  bool &ShouldContinue) {
947   switch (SuccSSeq) {
948   case S_CanRelease: {
949     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
950       S.ClearSequenceProgress();
951       break;
952     }
953     S.SetCFGHazardAfflicted(true);
954     ShouldContinue = true;
955     break;
956   }
957   case S_Use:
958     SomeSuccHasSame = true;
959     break;
960   case S_Stop:
961   case S_Release:
962   case S_MovableRelease:
963     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
964       AllSuccsHaveSame = false;
965     else
966       NotAllSeqEqualButKnownSafe = true;
967     break;
968   case S_Retain:
969     llvm_unreachable("bottom-up pointer in retain state!");
970   case S_None:
971     llvm_unreachable("This should have been handled earlier.");
972   }
973 }
974 
975 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
976 /// there are no CFG hazards by checking the states of various bottom up
977 /// pointers.
CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe)978 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
979                                         const bool SuccSRRIKnownSafe,
980                                         TopDownPtrState &S,
981                                         bool &SomeSuccHasSame,
982                                         bool &AllSuccsHaveSame,
983                                         bool &NotAllSeqEqualButKnownSafe) {
984   switch (SuccSSeq) {
985   case S_CanRelease:
986     SomeSuccHasSame = true;
987     break;
988   case S_Stop:
989   case S_Release:
990   case S_MovableRelease:
991   case S_Use:
992     if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
993       AllSuccsHaveSame = false;
994     else
995       NotAllSeqEqualButKnownSafe = true;
996     break;
997   case S_Retain:
998     llvm_unreachable("bottom-up pointer in retain state!");
999   case S_None:
1000     llvm_unreachable("This should have been handled earlier.");
1001   }
1002 }
1003 
1004 /// Check for critical edges, loop boundaries, irreducible control flow, or
1005 /// other CFG structures where moving code across the edge would result in it
1006 /// being executed more.
1007 void
CheckForCFGHazards(const BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BBState & MyStates) const1008 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1009                                DenseMap<const BasicBlock *, BBState> &BBStates,
1010                                BBState &MyStates) const {
1011   // If any top-down local-use or possible-dec has a succ which is earlier in
1012   // the sequence, forget it.
1013   for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1014        I != E; ++I) {
1015     TopDownPtrState &S = I->second;
1016     const Sequence Seq = I->second.GetSeq();
1017 
1018     // We only care about S_Retain, S_CanRelease, and S_Use.
1019     if (Seq == S_None)
1020       continue;
1021 
1022     // Make sure that if extra top down states are added in the future that this
1023     // code is updated to handle it.
1024     assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1025            "Unknown top down sequence state.");
1026 
1027     const Value *Arg = I->first;
1028     const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1029     bool SomeSuccHasSame = false;
1030     bool AllSuccsHaveSame = true;
1031     bool NotAllSeqEqualButKnownSafe = false;
1032 
1033     succ_const_iterator SI(TI), SE(TI, false);
1034 
1035     for (; SI != SE; ++SI) {
1036       // If VisitBottomUp has pointer information for this successor, take
1037       // what we know about it.
1038       const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1039         BBStates.find(*SI);
1040       assert(BBI != BBStates.end());
1041       const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1042       const Sequence SuccSSeq = SuccS.GetSeq();
1043 
1044       // If bottom up, the pointer is in an S_None state, clear the sequence
1045       // progress since the sequence in the bottom up state finished
1046       // suggesting a mismatch in between retains/releases. This is true for
1047       // all three cases that we are handling here: S_Retain, S_Use, and
1048       // S_CanRelease.
1049       if (SuccSSeq == S_None) {
1050         S.ClearSequenceProgress();
1051         continue;
1052       }
1053 
1054       // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1055       // checks.
1056       const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1057 
1058       // *NOTE* We do not use Seq from above here since we are allowing for
1059       // S.GetSeq() to change while we are visiting basic blocks.
1060       switch(S.GetSeq()) {
1061       case S_Use: {
1062         bool ShouldContinue = false;
1063         CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1064                              AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1065                              ShouldContinue);
1066         if (ShouldContinue)
1067           continue;
1068         break;
1069       }
1070       case S_CanRelease: {
1071         CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1072                                     SomeSuccHasSame, AllSuccsHaveSame,
1073                                     NotAllSeqEqualButKnownSafe);
1074         break;
1075       }
1076       case S_Retain:
1077       case S_None:
1078       case S_Stop:
1079       case S_Release:
1080       case S_MovableRelease:
1081         break;
1082       }
1083     }
1084 
1085     // If the state at the other end of any of the successor edges
1086     // matches the current state, require all edges to match. This
1087     // guards against loops in the middle of a sequence.
1088     if (SomeSuccHasSame && !AllSuccsHaveSame) {
1089       S.ClearSequenceProgress();
1090     } else if (NotAllSeqEqualButKnownSafe) {
1091       // If we would have cleared the state foregoing the fact that we are known
1092       // safe, stop code motion. This is because whether or not it is safe to
1093       // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1094       // are allowed to perform code motion.
1095       S.SetCFGHazardAfflicted(true);
1096     }
1097   }
1098 }
1099 
VisitInstructionBottomUp(Instruction * Inst,BasicBlock * BB,BlotMapVector<Value *,RRInfo> & Retains,BBState & MyStates)1100 bool ObjCARCOpt::VisitInstructionBottomUp(
1101     Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1102     BBState &MyStates) {
1103   bool NestingDetected = false;
1104   ARCInstKind Class = GetARCInstKind(Inst);
1105   const Value *Arg = nullptr;
1106 
1107   DEBUG(dbgs() << "        Class: " << Class << "\n");
1108 
1109   switch (Class) {
1110   case ARCInstKind::Release: {
1111     Arg = GetArgRCIdentityRoot(Inst);
1112 
1113     BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1114     NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1115     break;
1116   }
1117   case ARCInstKind::RetainBlock:
1118     // In OptimizeIndividualCalls, we have strength reduced all optimizable
1119     // objc_retainBlocks to objc_retains. Thus at this point any
1120     // objc_retainBlocks that we see are not optimizable.
1121     break;
1122   case ARCInstKind::Retain:
1123   case ARCInstKind::RetainRV: {
1124     Arg = GetArgRCIdentityRoot(Inst);
1125     BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1126     if (S.MatchWithRetain()) {
1127       // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1128       // it's better to let it remain as the first instruction after a call.
1129       if (Class != ARCInstKind::RetainRV) {
1130         DEBUG(llvm::dbgs() << "        Matching with: " << *Inst << "\n");
1131         Retains[Inst] = S.GetRRInfo();
1132       }
1133       S.ClearSequenceProgress();
1134     }
1135     // A retain moving bottom up can be a use.
1136     break;
1137   }
1138   case ARCInstKind::AutoreleasepoolPop:
1139     // Conservatively, clear MyStates for all known pointers.
1140     MyStates.clearBottomUpPointers();
1141     return NestingDetected;
1142   case ARCInstKind::AutoreleasepoolPush:
1143   case ARCInstKind::None:
1144     // These are irrelevant.
1145     return NestingDetected;
1146   case ARCInstKind::User:
1147     // If we have a store into an alloca of a pointer we are tracking, the
1148     // pointer has multiple owners implying that we must be more conservative.
1149     //
1150     // This comes up in the context of a pointer being ``KnownSafe''. In the
1151     // presence of a block being initialized, the frontend will emit the
1152     // objc_retain on the original pointer and the release on the pointer loaded
1153     // from the alloca. The optimizer will through the provenance analysis
1154     // realize that the two are related, but since we only require KnownSafe in
1155     // one direction, will match the inner retain on the original pointer with
1156     // the guard release on the original pointer. This is fixed by ensuring that
1157     // in the presence of allocas we only unconditionally remove pointers if
1158     // both our retain and our release are KnownSafe.
1159     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1160       const DataLayout &DL = BB->getModule()->getDataLayout();
1161       if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand(), DL)) {
1162         auto I = MyStates.findPtrBottomUpState(
1163             GetRCIdentityRoot(SI->getValueOperand()));
1164         if (I != MyStates.bottom_up_ptr_end())
1165           MultiOwnersSet.insert(I->first);
1166       }
1167     }
1168     break;
1169   default:
1170     break;
1171   }
1172 
1173   // Consider any other possible effects of this instruction on each
1174   // pointer being tracked.
1175   for (auto MI = MyStates.bottom_up_ptr_begin(),
1176             ME = MyStates.bottom_up_ptr_end();
1177        MI != ME; ++MI) {
1178     const Value *Ptr = MI->first;
1179     if (Ptr == Arg)
1180       continue; // Handled above.
1181     BottomUpPtrState &S = MI->second;
1182 
1183     if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1184       continue;
1185 
1186     S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1187   }
1188 
1189   return NestingDetected;
1190 }
1191 
VisitBottomUp(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains)1192 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1193                                DenseMap<const BasicBlock *, BBState> &BBStates,
1194                                BlotMapVector<Value *, RRInfo> &Retains) {
1195 
1196   DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1197 
1198   bool NestingDetected = false;
1199   BBState &MyStates = BBStates[BB];
1200 
1201   // Merge the states from each successor to compute the initial state
1202   // for the current block.
1203   BBState::edge_iterator SI(MyStates.succ_begin()),
1204                          SE(MyStates.succ_end());
1205   if (SI != SE) {
1206     const BasicBlock *Succ = *SI;
1207     DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1208     assert(I != BBStates.end());
1209     MyStates.InitFromSucc(I->second);
1210     ++SI;
1211     for (; SI != SE; ++SI) {
1212       Succ = *SI;
1213       I = BBStates.find(Succ);
1214       assert(I != BBStates.end());
1215       MyStates.MergeSucc(I->second);
1216     }
1217   }
1218 
1219   DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB] << "\n"
1220                      << "Performing Dataflow:\n");
1221 
1222   // Visit all the instructions, bottom-up.
1223   for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1224     Instruction *Inst = &*std::prev(I);
1225 
1226     // Invoke instructions are visited as part of their successors (below).
1227     if (isa<InvokeInst>(Inst))
1228       continue;
1229 
1230     DEBUG(dbgs() << "    Visiting " << *Inst << "\n");
1231 
1232     NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1233   }
1234 
1235   // If there's a predecessor with an invoke, visit the invoke as if it were
1236   // part of this block, since we can't insert code after an invoke in its own
1237   // block, and we don't want to split critical edges.
1238   for (BBState::edge_iterator PI(MyStates.pred_begin()),
1239        PE(MyStates.pred_end()); PI != PE; ++PI) {
1240     BasicBlock *Pred = *PI;
1241     if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1242       NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1243   }
1244 
1245   DEBUG(llvm::dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1246 
1247   return NestingDetected;
1248 }
1249 
1250 bool
VisitInstructionTopDown(Instruction * Inst,DenseMap<Value *,RRInfo> & Releases,BBState & MyStates)1251 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1252                                     DenseMap<Value *, RRInfo> &Releases,
1253                                     BBState &MyStates) {
1254   bool NestingDetected = false;
1255   ARCInstKind Class = GetARCInstKind(Inst);
1256   const Value *Arg = nullptr;
1257 
1258   DEBUG(llvm::dbgs() << "        Class: " << Class << "\n");
1259 
1260   switch (Class) {
1261   case ARCInstKind::RetainBlock:
1262     // In OptimizeIndividualCalls, we have strength reduced all optimizable
1263     // objc_retainBlocks to objc_retains. Thus at this point any
1264     // objc_retainBlocks that we see are not optimizable. We need to break since
1265     // a retain can be a potential use.
1266     break;
1267   case ARCInstKind::Retain:
1268   case ARCInstKind::RetainRV: {
1269     Arg = GetArgRCIdentityRoot(Inst);
1270     TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1271     NestingDetected |= S.InitTopDown(Class, Inst);
1272     // A retain can be a potential use; proceed to the generic checking
1273     // code below.
1274     break;
1275   }
1276   case ARCInstKind::Release: {
1277     Arg = GetArgRCIdentityRoot(Inst);
1278     TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1279     // Try to form a tentative pair in between this release instruction and the
1280     // top down pointers that we are tracking.
1281     if (S.MatchWithRelease(MDKindCache, Inst)) {
1282       // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1283       // Map}. Then we clear S.
1284       DEBUG(llvm::dbgs() << "        Matching with: " << *Inst << "\n");
1285       Releases[Inst] = S.GetRRInfo();
1286       S.ClearSequenceProgress();
1287     }
1288     break;
1289   }
1290   case ARCInstKind::AutoreleasepoolPop:
1291     // Conservatively, clear MyStates for all known pointers.
1292     MyStates.clearTopDownPointers();
1293     return false;
1294   case ARCInstKind::AutoreleasepoolPush:
1295   case ARCInstKind::None:
1296     // These can not be uses of
1297     return false;
1298   default:
1299     break;
1300   }
1301 
1302   // Consider any other possible effects of this instruction on each
1303   // pointer being tracked.
1304   for (auto MI = MyStates.top_down_ptr_begin(),
1305             ME = MyStates.top_down_ptr_end();
1306        MI != ME; ++MI) {
1307     const Value *Ptr = MI->first;
1308     if (Ptr == Arg)
1309       continue; // Handled above.
1310     TopDownPtrState &S = MI->second;
1311     if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1312       continue;
1313 
1314     S.HandlePotentialUse(Inst, Ptr, PA, Class);
1315   }
1316 
1317   return NestingDetected;
1318 }
1319 
1320 bool
VisitTopDown(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,DenseMap<Value *,RRInfo> & Releases)1321 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1322                          DenseMap<const BasicBlock *, BBState> &BBStates,
1323                          DenseMap<Value *, RRInfo> &Releases) {
1324   DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1325   bool NestingDetected = false;
1326   BBState &MyStates = BBStates[BB];
1327 
1328   // Merge the states from each predecessor to compute the initial state
1329   // for the current block.
1330   BBState::edge_iterator PI(MyStates.pred_begin()),
1331                          PE(MyStates.pred_end());
1332   if (PI != PE) {
1333     const BasicBlock *Pred = *PI;
1334     DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1335     assert(I != BBStates.end());
1336     MyStates.InitFromPred(I->second);
1337     ++PI;
1338     for (; PI != PE; ++PI) {
1339       Pred = *PI;
1340       I = BBStates.find(Pred);
1341       assert(I != BBStates.end());
1342       MyStates.MergePred(I->second);
1343     }
1344   }
1345 
1346   DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB]  << "\n"
1347                      << "Performing Dataflow:\n");
1348 
1349   // Visit all the instructions, top-down.
1350   for (Instruction &Inst : *BB) {
1351     DEBUG(dbgs() << "    Visiting " << Inst << "\n");
1352 
1353     NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
1354   }
1355 
1356   DEBUG(llvm::dbgs() << "\nState Before Checking for CFG Hazards:\n"
1357                      << BBStates[BB] << "\n\n");
1358   CheckForCFGHazards(BB, BBStates, MyStates);
1359   DEBUG(llvm::dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1360   return NestingDetected;
1361 }
1362 
1363 static void
ComputePostOrders(Function & F,SmallVectorImpl<BasicBlock * > & PostOrder,SmallVectorImpl<BasicBlock * > & ReverseCFGPostOrder,unsigned NoObjCARCExceptionsMDKind,DenseMap<const BasicBlock *,BBState> & BBStates)1364 ComputePostOrders(Function &F,
1365                   SmallVectorImpl<BasicBlock *> &PostOrder,
1366                   SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1367                   unsigned NoObjCARCExceptionsMDKind,
1368                   DenseMap<const BasicBlock *, BBState> &BBStates) {
1369   /// The visited set, for doing DFS walks.
1370   SmallPtrSet<BasicBlock *, 16> Visited;
1371 
1372   // Do DFS, computing the PostOrder.
1373   SmallPtrSet<BasicBlock *, 16> OnStack;
1374   SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1375 
1376   // Functions always have exactly one entry block, and we don't have
1377   // any other block that we treat like an entry block.
1378   BasicBlock *EntryBB = &F.getEntryBlock();
1379   BBState &MyStates = BBStates[EntryBB];
1380   MyStates.SetAsEntry();
1381   TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
1382   SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1383   Visited.insert(EntryBB);
1384   OnStack.insert(EntryBB);
1385   do {
1386   dfs_next_succ:
1387     BasicBlock *CurrBB = SuccStack.back().first;
1388     TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
1389     succ_iterator SE(TI, false);
1390 
1391     while (SuccStack.back().second != SE) {
1392       BasicBlock *SuccBB = *SuccStack.back().second++;
1393       if (Visited.insert(SuccBB).second) {
1394         TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
1395         SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
1396         BBStates[CurrBB].addSucc(SuccBB);
1397         BBState &SuccStates = BBStates[SuccBB];
1398         SuccStates.addPred(CurrBB);
1399         OnStack.insert(SuccBB);
1400         goto dfs_next_succ;
1401       }
1402 
1403       if (!OnStack.count(SuccBB)) {
1404         BBStates[CurrBB].addSucc(SuccBB);
1405         BBStates[SuccBB].addPred(CurrBB);
1406       }
1407     }
1408     OnStack.erase(CurrBB);
1409     PostOrder.push_back(CurrBB);
1410     SuccStack.pop_back();
1411   } while (!SuccStack.empty());
1412 
1413   Visited.clear();
1414 
1415   // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1416   // Functions may have many exits, and there also blocks which we treat
1417   // as exits due to ignored edges.
1418   SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1419   for (BasicBlock &ExitBB : F) {
1420     BBState &MyStates = BBStates[&ExitBB];
1421     if (!MyStates.isExit())
1422       continue;
1423 
1424     MyStates.SetAsExit();
1425 
1426     PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1427     Visited.insert(&ExitBB);
1428     while (!PredStack.empty()) {
1429     reverse_dfs_next_succ:
1430       BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1431       while (PredStack.back().second != PE) {
1432         BasicBlock *BB = *PredStack.back().second++;
1433         if (Visited.insert(BB).second) {
1434           PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1435           goto reverse_dfs_next_succ;
1436         }
1437       }
1438       ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1439     }
1440   }
1441 }
1442 
1443 // Visit the function both top-down and bottom-up.
Visit(Function & F,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases)1444 bool ObjCARCOpt::Visit(Function &F,
1445                        DenseMap<const BasicBlock *, BBState> &BBStates,
1446                        BlotMapVector<Value *, RRInfo> &Retains,
1447                        DenseMap<Value *, RRInfo> &Releases) {
1448 
1449   // Use reverse-postorder traversals, because we magically know that loops
1450   // will be well behaved, i.e. they won't repeatedly call retain on a single
1451   // pointer without doing a release. We can't use the ReversePostOrderTraversal
1452   // class here because we want the reverse-CFG postorder to consider each
1453   // function exit point, and we want to ignore selected cycle edges.
1454   SmallVector<BasicBlock *, 16> PostOrder;
1455   SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1456   ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1457                     MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1458                     BBStates);
1459 
1460   // Use reverse-postorder on the reverse CFG for bottom-up.
1461   bool BottomUpNestingDetected = false;
1462   for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1463        ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
1464        I != E; ++I)
1465     BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
1466 
1467   // Use reverse-postorder for top-down.
1468   bool TopDownNestingDetected = false;
1469   for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1470        PostOrder.rbegin(), E = PostOrder.rend();
1471        I != E; ++I)
1472     TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
1473 
1474   return TopDownNestingDetected && BottomUpNestingDetected;
1475 }
1476 
1477 /// Move the calls in RetainsToMove and ReleasesToMove.
MoveCalls(Value * Arg,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,SmallVectorImpl<Instruction * > & DeadInsts,Module * M)1478 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1479                            RRInfo &ReleasesToMove,
1480                            BlotMapVector<Value *, RRInfo> &Retains,
1481                            DenseMap<Value *, RRInfo> &Releases,
1482                            SmallVectorImpl<Instruction *> &DeadInsts,
1483                            Module *M) {
1484   Type *ArgTy = Arg->getType();
1485   Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1486 
1487   DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1488 
1489   // Insert the new retain and release calls.
1490   for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1491     Value *MyArg = ArgTy == ParamTy ? Arg :
1492                    new BitCastInst(Arg, ParamTy, "", InsertPt);
1493     Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1494     CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1495     Call->setDoesNotThrow();
1496     Call->setTailCall();
1497 
1498     DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
1499                     "At insertion point: " << *InsertPt << "\n");
1500   }
1501   for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1502     Value *MyArg = ArgTy == ParamTy ? Arg :
1503                    new BitCastInst(Arg, ParamTy, "", InsertPt);
1504     Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1505     CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1506     // Attach a clang.imprecise_release metadata tag, if appropriate.
1507     if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1508       Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1509     Call->setDoesNotThrow();
1510     if (ReleasesToMove.IsTailCallRelease)
1511       Call->setTailCall();
1512 
1513     DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
1514                     "At insertion point: " << *InsertPt << "\n");
1515   }
1516 
1517   // Delete the original retain and release calls.
1518   for (Instruction *OrigRetain : RetainsToMove.Calls) {
1519     Retains.blot(OrigRetain);
1520     DeadInsts.push_back(OrigRetain);
1521     DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1522   }
1523   for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1524     Releases.erase(OrigRelease);
1525     DeadInsts.push_back(OrigRelease);
1526     DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1527   }
1528 
1529 }
1530 
PairUpRetainsAndReleases(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M,SmallVectorImpl<Instruction * > & NewRetains,SmallVectorImpl<Instruction * > & NewReleases,SmallVectorImpl<Instruction * > & DeadInsts,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,Value * Arg,bool KnownSafe,bool & AnyPairsCompletelyEliminated)1531 bool ObjCARCOpt::PairUpRetainsAndReleases(
1532     DenseMap<const BasicBlock *, BBState> &BBStates,
1533     BlotMapVector<Value *, RRInfo> &Retains,
1534     DenseMap<Value *, RRInfo> &Releases, Module *M,
1535     SmallVectorImpl<Instruction *> &NewRetains,
1536     SmallVectorImpl<Instruction *> &NewReleases,
1537     SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1538     RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1539     bool &AnyPairsCompletelyEliminated) {
1540   // If a pair happens in a region where it is known that the reference count
1541   // is already incremented, we can similarly ignore possible decrements unless
1542   // we are dealing with a retainable object with multiple provenance sources.
1543   bool KnownSafeTD = true, KnownSafeBU = true;
1544   bool MultipleOwners = false;
1545   bool CFGHazardAfflicted = false;
1546 
1547   // Connect the dots between the top-down-collected RetainsToMove and
1548   // bottom-up-collected ReleasesToMove to form sets of related calls.
1549   // This is an iterative process so that we connect multiple releases
1550   // to multiple retains if needed.
1551   unsigned OldDelta = 0;
1552   unsigned NewDelta = 0;
1553   unsigned OldCount = 0;
1554   unsigned NewCount = 0;
1555   bool FirstRelease = true;
1556   for (;;) {
1557     for (SmallVectorImpl<Instruction *>::const_iterator
1558            NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
1559       Instruction *NewRetain = *NI;
1560       auto It = Retains.find(NewRetain);
1561       assert(It != Retains.end());
1562       const RRInfo &NewRetainRRI = It->second;
1563       KnownSafeTD &= NewRetainRRI.KnownSafe;
1564       MultipleOwners =
1565         MultipleOwners || MultiOwnersSet.count(GetArgRCIdentityRoot(NewRetain));
1566       for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1567         auto Jt = Releases.find(NewRetainRelease);
1568         if (Jt == Releases.end())
1569           return false;
1570         const RRInfo &NewRetainReleaseRRI = Jt->second;
1571 
1572         // If the release does not have a reference to the retain as well,
1573         // something happened which is unaccounted for. Do not do anything.
1574         //
1575         // This can happen if we catch an additive overflow during path count
1576         // merging.
1577         if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1578           return false;
1579 
1580         if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1581 
1582           // If we overflow when we compute the path count, don't remove/move
1583           // anything.
1584           const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1585           unsigned PathCount = BBState::OverflowOccurredValue;
1586           if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1587             return false;
1588           assert(PathCount != BBState::OverflowOccurredValue &&
1589                  "PathCount at this point can not be "
1590                  "OverflowOccurredValue.");
1591           OldDelta -= PathCount;
1592 
1593           // Merge the ReleaseMetadata and IsTailCallRelease values.
1594           if (FirstRelease) {
1595             ReleasesToMove.ReleaseMetadata =
1596               NewRetainReleaseRRI.ReleaseMetadata;
1597             ReleasesToMove.IsTailCallRelease =
1598               NewRetainReleaseRRI.IsTailCallRelease;
1599             FirstRelease = false;
1600           } else {
1601             if (ReleasesToMove.ReleaseMetadata !=
1602                 NewRetainReleaseRRI.ReleaseMetadata)
1603               ReleasesToMove.ReleaseMetadata = nullptr;
1604             if (ReleasesToMove.IsTailCallRelease !=
1605                 NewRetainReleaseRRI.IsTailCallRelease)
1606               ReleasesToMove.IsTailCallRelease = false;
1607           }
1608 
1609           // Collect the optimal insertion points.
1610           if (!KnownSafe)
1611             for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1612               if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1613                 // If we overflow when we compute the path count, don't
1614                 // remove/move anything.
1615                 const BBState &RIPBBState = BBStates[RIP->getParent()];
1616                 PathCount = BBState::OverflowOccurredValue;
1617                 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1618                   return false;
1619                 assert(PathCount != BBState::OverflowOccurredValue &&
1620                        "PathCount at this point can not be "
1621                        "OverflowOccurredValue.");
1622                 NewDelta -= PathCount;
1623               }
1624             }
1625           NewReleases.push_back(NewRetainRelease);
1626         }
1627       }
1628     }
1629     NewRetains.clear();
1630     if (NewReleases.empty()) break;
1631 
1632     // Back the other way.
1633     for (SmallVectorImpl<Instruction *>::const_iterator
1634            NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
1635       Instruction *NewRelease = *NI;
1636       auto It = Releases.find(NewRelease);
1637       assert(It != Releases.end());
1638       const RRInfo &NewReleaseRRI = It->second;
1639       KnownSafeBU &= NewReleaseRRI.KnownSafe;
1640       CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1641       for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1642         auto Jt = Retains.find(NewReleaseRetain);
1643         if (Jt == Retains.end())
1644           return false;
1645         const RRInfo &NewReleaseRetainRRI = Jt->second;
1646 
1647         // If the retain does not have a reference to the release as well,
1648         // something happened which is unaccounted for. Do not do anything.
1649         //
1650         // This can happen if we catch an additive overflow during path count
1651         // merging.
1652         if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1653           return false;
1654 
1655         if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1656           // If we overflow when we compute the path count, don't remove/move
1657           // anything.
1658           const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1659           unsigned PathCount = BBState::OverflowOccurredValue;
1660           if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1661             return false;
1662           assert(PathCount != BBState::OverflowOccurredValue &&
1663                  "PathCount at this point can not be "
1664                  "OverflowOccurredValue.");
1665           OldDelta += PathCount;
1666           OldCount += PathCount;
1667 
1668           // Collect the optimal insertion points.
1669           if (!KnownSafe)
1670             for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1671               if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1672                 // If we overflow when we compute the path count, don't
1673                 // remove/move anything.
1674                 const BBState &RIPBBState = BBStates[RIP->getParent()];
1675 
1676                 PathCount = BBState::OverflowOccurredValue;
1677                 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1678                   return false;
1679                 assert(PathCount != BBState::OverflowOccurredValue &&
1680                        "PathCount at this point can not be "
1681                        "OverflowOccurredValue.");
1682                 NewDelta += PathCount;
1683                 NewCount += PathCount;
1684               }
1685             }
1686           NewRetains.push_back(NewReleaseRetain);
1687         }
1688       }
1689     }
1690     NewReleases.clear();
1691     if (NewRetains.empty()) break;
1692   }
1693 
1694   // We can only remove pointers if we are known safe in both directions.
1695   bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1696   if (UnconditionallySafe) {
1697     RetainsToMove.ReverseInsertPts.clear();
1698     ReleasesToMove.ReverseInsertPts.clear();
1699     NewCount = 0;
1700   } else {
1701     // Determine whether the new insertion points we computed preserve the
1702     // balance of retain and release calls through the program.
1703     // TODO: If the fully aggressive solution isn't valid, try to find a
1704     // less aggressive solution which is.
1705     if (NewDelta != 0)
1706       return false;
1707 
1708     // At this point, we are not going to remove any RR pairs, but we still are
1709     // able to move RR pairs. If one of our pointers is afflicted with
1710     // CFGHazards, we cannot perform such code motion so exit early.
1711     const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
1712       ReleasesToMove.ReverseInsertPts.size();
1713     if (CFGHazardAfflicted && WillPerformCodeMotion)
1714       return false;
1715   }
1716 
1717   // Determine whether the original call points are balanced in the retain and
1718   // release calls through the program. If not, conservatively don't touch
1719   // them.
1720   // TODO: It's theoretically possible to do code motion in this case, as
1721   // long as the existing imbalances are maintained.
1722   if (OldDelta != 0)
1723     return false;
1724 
1725   Changed = true;
1726   assert(OldCount != 0 && "Unreachable code?");
1727   NumRRs += OldCount - NewCount;
1728   // Set to true if we completely removed any RR pairs.
1729   AnyPairsCompletelyEliminated = NewCount == 0;
1730 
1731   // We can move calls!
1732   return true;
1733 }
1734 
1735 /// Identify pairings between the retains and releases, and delete and/or move
1736 /// them.
PerformCodePlacement(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M)1737 bool ObjCARCOpt::PerformCodePlacement(
1738     DenseMap<const BasicBlock *, BBState> &BBStates,
1739     BlotMapVector<Value *, RRInfo> &Retains,
1740     DenseMap<Value *, RRInfo> &Releases, Module *M) {
1741   DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
1742 
1743   bool AnyPairsCompletelyEliminated = false;
1744   RRInfo RetainsToMove;
1745   RRInfo ReleasesToMove;
1746   SmallVector<Instruction *, 4> NewRetains;
1747   SmallVector<Instruction *, 4> NewReleases;
1748   SmallVector<Instruction *, 8> DeadInsts;
1749 
1750   // Visit each retain.
1751   for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
1752                                                       E = Retains.end();
1753        I != E; ++I) {
1754     Value *V = I->first;
1755     if (!V) continue; // blotted
1756 
1757     Instruction *Retain = cast<Instruction>(V);
1758 
1759     DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
1760 
1761     Value *Arg = GetArgRCIdentityRoot(Retain);
1762 
1763     // If the object being released is in static or stack storage, we know it's
1764     // not being managed by ObjC reference counting, so we can delete pairs
1765     // regardless of what possible decrements or uses lie between them.
1766     bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
1767 
1768     // A constant pointer can't be pointing to an object on the heap. It may
1769     // be reference-counted, but it won't be deleted.
1770     if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
1771       if (const GlobalVariable *GV =
1772             dyn_cast<GlobalVariable>(
1773               GetRCIdentityRoot(LI->getPointerOperand())))
1774         if (GV->isConstant())
1775           KnownSafe = true;
1776 
1777     // Connect the dots between the top-down-collected RetainsToMove and
1778     // bottom-up-collected ReleasesToMove to form sets of related calls.
1779     NewRetains.push_back(Retain);
1780     bool PerformMoveCalls = PairUpRetainsAndReleases(
1781         BBStates, Retains, Releases, M, NewRetains, NewReleases, DeadInsts,
1782         RetainsToMove, ReleasesToMove, Arg, KnownSafe,
1783         AnyPairsCompletelyEliminated);
1784 
1785     if (PerformMoveCalls) {
1786       // Ok, everything checks out and we're all set. Let's move/delete some
1787       // code!
1788       MoveCalls(Arg, RetainsToMove, ReleasesToMove,
1789                 Retains, Releases, DeadInsts, M);
1790     }
1791 
1792     // Clean up state for next retain.
1793     NewReleases.clear();
1794     NewRetains.clear();
1795     RetainsToMove.clear();
1796     ReleasesToMove.clear();
1797   }
1798 
1799   // Now that we're done moving everything, we can delete the newly dead
1800   // instructions, as we no longer need them as insert points.
1801   while (!DeadInsts.empty())
1802     EraseInstruction(DeadInsts.pop_back_val());
1803 
1804   return AnyPairsCompletelyEliminated;
1805 }
1806 
1807 /// Weak pointer optimizations.
OptimizeWeakCalls(Function & F)1808 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
1809   DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
1810 
1811   // First, do memdep-style RLE and S2L optimizations. We can't use memdep
1812   // itself because it uses AliasAnalysis and we need to do provenance
1813   // queries instead.
1814   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1815     Instruction *Inst = &*I++;
1816 
1817     DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
1818 
1819     ARCInstKind Class = GetBasicARCInstKind(Inst);
1820     if (Class != ARCInstKind::LoadWeak &&
1821         Class != ARCInstKind::LoadWeakRetained)
1822       continue;
1823 
1824     // Delete objc_loadWeak calls with no users.
1825     if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
1826       Inst->eraseFromParent();
1827       continue;
1828     }
1829 
1830     // TODO: For now, just look for an earlier available version of this value
1831     // within the same block. Theoretically, we could do memdep-style non-local
1832     // analysis too, but that would want caching. A better approach would be to
1833     // use the technique that EarlyCSE uses.
1834     inst_iterator Current = std::prev(I);
1835     BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
1836     for (BasicBlock::iterator B = CurrentBB->begin(),
1837                               J = Current.getInstructionIterator();
1838          J != B; --J) {
1839       Instruction *EarlierInst = &*std::prev(J);
1840       ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
1841       switch (EarlierClass) {
1842       case ARCInstKind::LoadWeak:
1843       case ARCInstKind::LoadWeakRetained: {
1844         // If this is loading from the same pointer, replace this load's value
1845         // with that one.
1846         CallInst *Call = cast<CallInst>(Inst);
1847         CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1848         Value *Arg = Call->getArgOperand(0);
1849         Value *EarlierArg = EarlierCall->getArgOperand(0);
1850         switch (PA.getAA()->alias(Arg, EarlierArg)) {
1851         case MustAlias:
1852           Changed = true;
1853           // If the load has a builtin retain, insert a plain retain for it.
1854           if (Class == ARCInstKind::LoadWeakRetained) {
1855             Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1856             CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1857             CI->setTailCall();
1858           }
1859           // Zap the fully redundant load.
1860           Call->replaceAllUsesWith(EarlierCall);
1861           Call->eraseFromParent();
1862           goto clobbered;
1863         case MayAlias:
1864         case PartialAlias:
1865           goto clobbered;
1866         case NoAlias:
1867           break;
1868         }
1869         break;
1870       }
1871       case ARCInstKind::StoreWeak:
1872       case ARCInstKind::InitWeak: {
1873         // If this is storing to the same pointer and has the same size etc.
1874         // replace this load's value with the stored value.
1875         CallInst *Call = cast<CallInst>(Inst);
1876         CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1877         Value *Arg = Call->getArgOperand(0);
1878         Value *EarlierArg = EarlierCall->getArgOperand(0);
1879         switch (PA.getAA()->alias(Arg, EarlierArg)) {
1880         case MustAlias:
1881           Changed = true;
1882           // If the load has a builtin retain, insert a plain retain for it.
1883           if (Class == ARCInstKind::LoadWeakRetained) {
1884             Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1885             CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1886             CI->setTailCall();
1887           }
1888           // Zap the fully redundant load.
1889           Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
1890           Call->eraseFromParent();
1891           goto clobbered;
1892         case MayAlias:
1893         case PartialAlias:
1894           goto clobbered;
1895         case NoAlias:
1896           break;
1897         }
1898         break;
1899       }
1900       case ARCInstKind::MoveWeak:
1901       case ARCInstKind::CopyWeak:
1902         // TOOD: Grab the copied value.
1903         goto clobbered;
1904       case ARCInstKind::AutoreleasepoolPush:
1905       case ARCInstKind::None:
1906       case ARCInstKind::IntrinsicUser:
1907       case ARCInstKind::User:
1908         // Weak pointers are only modified through the weak entry points
1909         // (and arbitrary calls, which could call the weak entry points).
1910         break;
1911       default:
1912         // Anything else could modify the weak pointer.
1913         goto clobbered;
1914       }
1915     }
1916   clobbered:;
1917   }
1918 
1919   // Then, for each destroyWeak with an alloca operand, check to see if
1920   // the alloca and all its users can be zapped.
1921   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1922     Instruction *Inst = &*I++;
1923     ARCInstKind Class = GetBasicARCInstKind(Inst);
1924     if (Class != ARCInstKind::DestroyWeak)
1925       continue;
1926 
1927     CallInst *Call = cast<CallInst>(Inst);
1928     Value *Arg = Call->getArgOperand(0);
1929     if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
1930       for (User *U : Alloca->users()) {
1931         const Instruction *UserInst = cast<Instruction>(U);
1932         switch (GetBasicARCInstKind(UserInst)) {
1933         case ARCInstKind::InitWeak:
1934         case ARCInstKind::StoreWeak:
1935         case ARCInstKind::DestroyWeak:
1936           continue;
1937         default:
1938           goto done;
1939         }
1940       }
1941       Changed = true;
1942       for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
1943         CallInst *UserInst = cast<CallInst>(*UI++);
1944         switch (GetBasicARCInstKind(UserInst)) {
1945         case ARCInstKind::InitWeak:
1946         case ARCInstKind::StoreWeak:
1947           // These functions return their second argument.
1948           UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
1949           break;
1950         case ARCInstKind::DestroyWeak:
1951           // No return value.
1952           break;
1953         default:
1954           llvm_unreachable("alloca really is used!");
1955         }
1956         UserInst->eraseFromParent();
1957       }
1958       Alloca->eraseFromParent();
1959     done:;
1960     }
1961   }
1962 }
1963 
1964 /// Identify program paths which execute sequences of retains and releases which
1965 /// can be eliminated.
OptimizeSequences(Function & F)1966 bool ObjCARCOpt::OptimizeSequences(Function &F) {
1967   // Releases, Retains - These are used to store the results of the main flow
1968   // analysis. These use Value* as the key instead of Instruction* so that the
1969   // map stays valid when we get around to rewriting code and calls get
1970   // replaced by arguments.
1971   DenseMap<Value *, RRInfo> Releases;
1972   BlotMapVector<Value *, RRInfo> Retains;
1973 
1974   // This is used during the traversal of the function to track the
1975   // states for each identified object at each block.
1976   DenseMap<const BasicBlock *, BBState> BBStates;
1977 
1978   // Analyze the CFG of the function, and all instructions.
1979   bool NestingDetected = Visit(F, BBStates, Retains, Releases);
1980 
1981   // Transform.
1982   bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
1983                                                            Releases,
1984                                                            F.getParent());
1985 
1986   // Cleanup.
1987   MultiOwnersSet.clear();
1988 
1989   return AnyPairsCompletelyEliminated && NestingDetected;
1990 }
1991 
1992 /// Check if there is a dependent call earlier that does not have anything in
1993 /// between the Retain and the call that can affect the reference count of their
1994 /// shared pointer argument. Note that Retain need not be in BB.
1995 static bool
HasSafePathToPredecessorCall(const Value * Arg,Instruction * Retain,SmallPtrSetImpl<Instruction * > & DepInsts,SmallPtrSetImpl<const BasicBlock * > & Visited,ProvenanceAnalysis & PA)1996 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
1997                              SmallPtrSetImpl<Instruction *> &DepInsts,
1998                              SmallPtrSetImpl<const BasicBlock *> &Visited,
1999                              ProvenanceAnalysis &PA) {
2000   FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2001                    DepInsts, Visited, PA);
2002   if (DepInsts.size() != 1)
2003     return false;
2004 
2005   auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2006 
2007   // Check that the pointer is the return value of the call.
2008   if (!Call || Arg != Call)
2009     return false;
2010 
2011   // Check that the call is a regular call.
2012   ARCInstKind Class = GetBasicARCInstKind(Call);
2013   if (Class != ARCInstKind::CallOrUser && Class != ARCInstKind::Call)
2014     return false;
2015 
2016   return true;
2017 }
2018 
2019 /// Find a dependent retain that precedes the given autorelease for which there
2020 /// is nothing in between the two instructions that can affect the ref count of
2021 /// Arg.
2022 static CallInst *
FindPredecessorRetainWithSafePath(const Value * Arg,BasicBlock * BB,Instruction * Autorelease,SmallPtrSetImpl<Instruction * > & DepInsts,SmallPtrSetImpl<const BasicBlock * > & Visited,ProvenanceAnalysis & PA)2023 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2024                                   Instruction *Autorelease,
2025                                   SmallPtrSetImpl<Instruction *> &DepInsts,
2026                                   SmallPtrSetImpl<const BasicBlock *> &Visited,
2027                                   ProvenanceAnalysis &PA) {
2028   FindDependencies(CanChangeRetainCount, Arg,
2029                    BB, Autorelease, DepInsts, Visited, PA);
2030   if (DepInsts.size() != 1)
2031     return nullptr;
2032 
2033   auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2034 
2035   // Check that we found a retain with the same argument.
2036   if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2037       GetArgRCIdentityRoot(Retain) != Arg) {
2038     return nullptr;
2039   }
2040 
2041   return Retain;
2042 }
2043 
2044 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2045 /// no instructions dependent on Arg that need a positive ref count in between
2046 /// the autorelease and the ret.
2047 static CallInst *
FindPredecessorAutoreleaseWithSafePath(const Value * Arg,BasicBlock * BB,ReturnInst * Ret,SmallPtrSetImpl<Instruction * > & DepInsts,SmallPtrSetImpl<const BasicBlock * > & V,ProvenanceAnalysis & PA)2048 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2049                                        ReturnInst *Ret,
2050                                        SmallPtrSetImpl<Instruction *> &DepInsts,
2051                                        SmallPtrSetImpl<const BasicBlock *> &V,
2052                                        ProvenanceAnalysis &PA) {
2053   FindDependencies(NeedsPositiveRetainCount, Arg,
2054                    BB, Ret, DepInsts, V, PA);
2055   if (DepInsts.size() != 1)
2056     return nullptr;
2057 
2058   auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2059   if (!Autorelease)
2060     return nullptr;
2061   ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2062   if (!IsAutorelease(AutoreleaseClass))
2063     return nullptr;
2064   if (GetArgRCIdentityRoot(Autorelease) != Arg)
2065     return nullptr;
2066 
2067   return Autorelease;
2068 }
2069 
2070 /// Look for this pattern:
2071 /// \code
2072 ///    %call = call i8* @something(...)
2073 ///    %2 = call i8* @objc_retain(i8* %call)
2074 ///    %3 = call i8* @objc_autorelease(i8* %2)
2075 ///    ret i8* %3
2076 /// \endcode
2077 /// And delete the retain and autorelease.
OptimizeReturns(Function & F)2078 void ObjCARCOpt::OptimizeReturns(Function &F) {
2079   if (!F.getReturnType()->isPointerTy())
2080     return;
2081 
2082   DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2083 
2084   SmallPtrSet<Instruction *, 4> DependingInstructions;
2085   SmallPtrSet<const BasicBlock *, 4> Visited;
2086   for (BasicBlock &BB: F) {
2087     ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2088 
2089     DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2090 
2091     if (!Ret)
2092       continue;
2093 
2094     const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2095 
2096     // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2097     // dependent on Arg such that there are no instructions dependent on Arg
2098     // that need a positive ref count in between the autorelease and Ret.
2099     CallInst *Autorelease = FindPredecessorAutoreleaseWithSafePath(
2100         Arg, &BB, Ret, DependingInstructions, Visited, PA);
2101     DependingInstructions.clear();
2102     Visited.clear();
2103 
2104     if (!Autorelease)
2105       continue;
2106 
2107     CallInst *Retain = FindPredecessorRetainWithSafePath(
2108         Arg, &BB, Autorelease, DependingInstructions, Visited, PA);
2109     DependingInstructions.clear();
2110     Visited.clear();
2111 
2112     if (!Retain)
2113       continue;
2114 
2115     // Check that there is nothing that can affect the reference count
2116     // between the retain and the call.  Note that Retain need not be in BB.
2117     bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2118                                                           DependingInstructions,
2119                                                           Visited, PA);
2120     DependingInstructions.clear();
2121     Visited.clear();
2122 
2123     if (!HasSafePathToCall)
2124       continue;
2125 
2126     // If so, we can zap the retain and autorelease.
2127     Changed = true;
2128     ++NumRets;
2129     DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2130           << *Autorelease << "\n");
2131     EraseInstruction(Retain);
2132     EraseInstruction(Autorelease);
2133   }
2134 }
2135 
2136 #ifndef NDEBUG
2137 void
GatherStatistics(Function & F,bool AfterOptimization)2138 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2139   llvm::Statistic &NumRetains =
2140     AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2141   llvm::Statistic &NumReleases =
2142     AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2143 
2144   for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2145     Instruction *Inst = &*I++;
2146     switch (GetBasicARCInstKind(Inst)) {
2147     default:
2148       break;
2149     case ARCInstKind::Retain:
2150       ++NumRetains;
2151       break;
2152     case ARCInstKind::Release:
2153       ++NumReleases;
2154       break;
2155     }
2156   }
2157 }
2158 #endif
2159 
doInitialization(Module & M)2160 bool ObjCARCOpt::doInitialization(Module &M) {
2161   if (!EnableARCOpts)
2162     return false;
2163 
2164   // If nothing in the Module uses ARC, don't do anything.
2165   Run = ModuleHasARC(M);
2166   if (!Run)
2167     return false;
2168 
2169   // Intuitively, objc_retain and others are nocapture, however in practice
2170   // they are not, because they return their argument value. And objc_release
2171   // calls finalizers which can have arbitrary side effects.
2172   MDKindCache.init(&M);
2173 
2174   // Initialize our runtime entry point cache.
2175   EP.init(&M);
2176 
2177   return false;
2178 }
2179 
runOnFunction(Function & F)2180 bool ObjCARCOpt::runOnFunction(Function &F) {
2181   if (!EnableARCOpts)
2182     return false;
2183 
2184   // If nothing in the Module uses ARC, don't do anything.
2185   if (!Run)
2186     return false;
2187 
2188   Changed = false;
2189 
2190   DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
2191         "\n");
2192 
2193   PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
2194 
2195 #ifndef NDEBUG
2196   if (AreStatisticsEnabled()) {
2197     GatherStatistics(F, false);
2198   }
2199 #endif
2200 
2201   // This pass performs several distinct transformations. As a compile-time aid
2202   // when compiling code that isn't ObjC, skip these if the relevant ObjC
2203   // library functions aren't declared.
2204 
2205   // Preliminary optimizations. This also computes UsedInThisFunction.
2206   OptimizeIndividualCalls(F);
2207 
2208   // Optimizations for weak pointers.
2209   if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2210                             (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2211                             (1 << unsigned(ARCInstKind::StoreWeak)) |
2212                             (1 << unsigned(ARCInstKind::InitWeak)) |
2213                             (1 << unsigned(ARCInstKind::CopyWeak)) |
2214                             (1 << unsigned(ARCInstKind::MoveWeak)) |
2215                             (1 << unsigned(ARCInstKind::DestroyWeak))))
2216     OptimizeWeakCalls(F);
2217 
2218   // Optimizations for retain+release pairs.
2219   if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2220                             (1 << unsigned(ARCInstKind::RetainRV)) |
2221                             (1 << unsigned(ARCInstKind::RetainBlock))))
2222     if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2223       // Run OptimizeSequences until it either stops making changes or
2224       // no retain+release pair nesting is detected.
2225       while (OptimizeSequences(F)) {}
2226 
2227   // Optimizations if objc_autorelease is used.
2228   if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2229                             (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2230     OptimizeReturns(F);
2231 
2232   // Gather statistics after optimization.
2233 #ifndef NDEBUG
2234   if (AreStatisticsEnabled()) {
2235     GatherStatistics(F, true);
2236   }
2237 #endif
2238 
2239   DEBUG(dbgs() << "\n");
2240 
2241   return Changed;
2242 }
2243 
releaseMemory()2244 void ObjCARCOpt::releaseMemory() {
2245   PA.clear();
2246 }
2247 
2248 /// @}
2249 ///
2250