1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
12 /// in Objective C.
13 ///
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
17 ///
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
20 ///
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
24 //
25 //===----------------------------------------------------------------------===//
26
27 #include "ARCRuntimeEntryPoints.h"
28 #include "BlotMapVector.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARC.h"
31 #include "ProvenanceAnalysis.h"
32 #include "PtrState.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/None.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/Analysis/AliasAnalysis.h"
40 #include "llvm/Analysis/EHPersonalities.h"
41 #include "llvm/Analysis/ObjCARCAliasAnalysis.h"
42 #include "llvm/Analysis/ObjCARCAnalysisUtils.h"
43 #include "llvm/Analysis/ObjCARCInstKind.h"
44 #include "llvm/IR/BasicBlock.h"
45 #include "llvm/IR/CFG.h"
46 #include "llvm/IR/CallSite.h"
47 #include "llvm/IR/Constant.h"
48 #include "llvm/IR/Constants.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GlobalVariable.h"
52 #include "llvm/IR/InstIterator.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instruction.h"
55 #include "llvm/IR/Instructions.h"
56 #include "llvm/IR/LLVMContext.h"
57 #include "llvm/IR/Metadata.h"
58 #include "llvm/IR/Type.h"
59 #include "llvm/IR/User.h"
60 #include "llvm/IR/Value.h"
61 #include "llvm/InitializePasses.h"
62 #include "llvm/Pass.h"
63 #include "llvm/Support/Casting.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/Compiler.h"
66 #include "llvm/Support/Debug.h"
67 #include "llvm/Support/ErrorHandling.h"
68 #include "llvm/Support/raw_ostream.h"
69 #include <cassert>
70 #include <iterator>
71 #include <utility>
72
73 using namespace llvm;
74 using namespace llvm::objcarc;
75
76 #define DEBUG_TYPE "objc-arc-opts"
77
78 static cl::opt<unsigned> MaxPtrStates("arc-opt-max-ptr-states",
79 cl::Hidden,
80 cl::desc("Maximum number of ptr states the optimizer keeps track of"),
81 cl::init(4095));
82
83 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
84 /// @{
85
86 /// This is similar to GetRCIdentityRoot but it stops as soon
87 /// as it finds a value with multiple uses.
FindSingleUseIdentifiedObject(const Value * Arg)88 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
89 // ConstantData (like ConstantPointerNull and UndefValue) is used across
90 // modules. It's never a single-use value.
91 if (isa<ConstantData>(Arg))
92 return nullptr;
93
94 if (Arg->hasOneUse()) {
95 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
96 return FindSingleUseIdentifiedObject(BC->getOperand(0));
97 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
98 if (GEP->hasAllZeroIndices())
99 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
100 if (IsForwarding(GetBasicARCInstKind(Arg)))
101 return FindSingleUseIdentifiedObject(
102 cast<CallInst>(Arg)->getArgOperand(0));
103 if (!IsObjCIdentifiedObject(Arg))
104 return nullptr;
105 return Arg;
106 }
107
108 // If we found an identifiable object but it has multiple uses, but they are
109 // trivial uses, we can still consider this to be a single-use value.
110 if (IsObjCIdentifiedObject(Arg)) {
111 for (const User *U : Arg->users())
112 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
113 return nullptr;
114
115 return Arg;
116 }
117
118 return nullptr;
119 }
120
121 /// @}
122 ///
123 /// \defgroup ARCOpt ARC Optimization.
124 /// @{
125
126 // TODO: On code like this:
127 //
128 // objc_retain(%x)
129 // stuff_that_cannot_release()
130 // objc_autorelease(%x)
131 // stuff_that_cannot_release()
132 // objc_retain(%x)
133 // stuff_that_cannot_release()
134 // objc_autorelease(%x)
135 //
136 // The second retain and autorelease can be deleted.
137
138 // TODO: It should be possible to delete
139 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
140 // pairs if nothing is actually autoreleased between them. Also, autorelease
141 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
142 // after inlining) can be turned into plain release calls.
143
144 // TODO: Critical-edge splitting. If the optimial insertion point is
145 // a critical edge, the current algorithm has to fail, because it doesn't
146 // know how to split edges. It should be possible to make the optimizer
147 // think in terms of edges, rather than blocks, and then split critical
148 // edges on demand.
149
150 // TODO: OptimizeSequences could generalized to be Interprocedural.
151
152 // TODO: Recognize that a bunch of other objc runtime calls have
153 // non-escaping arguments and non-releasing arguments, and may be
154 // non-autoreleasing.
155
156 // TODO: Sink autorelease calls as far as possible. Unfortunately we
157 // usually can't sink them past other calls, which would be the main
158 // case where it would be useful.
159
160 // TODO: The pointer returned from objc_loadWeakRetained is retained.
161
162 // TODO: Delete release+retain pairs (rare).
163
164 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
165 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
166 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
167 STATISTIC(NumRets, "Number of return value forwarding "
168 "retain+autoreleases eliminated");
169 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
170 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
171 #ifndef NDEBUG
172 STATISTIC(NumRetainsBeforeOpt,
173 "Number of retains before optimization");
174 STATISTIC(NumReleasesBeforeOpt,
175 "Number of releases before optimization");
176 STATISTIC(NumRetainsAfterOpt,
177 "Number of retains after optimization");
178 STATISTIC(NumReleasesAfterOpt,
179 "Number of releases after optimization");
180 #endif
181
182 namespace {
183
184 /// Per-BasicBlock state.
185 class BBState {
186 /// The number of unique control paths from the entry which can reach this
187 /// block.
188 unsigned TopDownPathCount = 0;
189
190 /// The number of unique control paths to exits from this block.
191 unsigned BottomUpPathCount = 0;
192
193 /// The top-down traversal uses this to record information known about a
194 /// pointer at the bottom of each block.
195 BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
196
197 /// The bottom-up traversal uses this to record information known about a
198 /// pointer at the top of each block.
199 BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
200
201 /// Effective predecessors of the current block ignoring ignorable edges and
202 /// ignored backedges.
203 SmallVector<BasicBlock *, 2> Preds;
204
205 /// Effective successors of the current block ignoring ignorable edges and
206 /// ignored backedges.
207 SmallVector<BasicBlock *, 2> Succs;
208
209 public:
210 static const unsigned OverflowOccurredValue;
211
212 BBState() = default;
213
214 using top_down_ptr_iterator = decltype(PerPtrTopDown)::iterator;
215 using const_top_down_ptr_iterator = decltype(PerPtrTopDown)::const_iterator;
216
top_down_ptr_begin()217 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
top_down_ptr_end()218 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
top_down_ptr_begin() const219 const_top_down_ptr_iterator top_down_ptr_begin() const {
220 return PerPtrTopDown.begin();
221 }
top_down_ptr_end() const222 const_top_down_ptr_iterator top_down_ptr_end() const {
223 return PerPtrTopDown.end();
224 }
hasTopDownPtrs() const225 bool hasTopDownPtrs() const {
226 return !PerPtrTopDown.empty();
227 }
228
top_down_ptr_list_size() const229 unsigned top_down_ptr_list_size() const {
230 return std::distance(top_down_ptr_begin(), top_down_ptr_end());
231 }
232
233 using bottom_up_ptr_iterator = decltype(PerPtrBottomUp)::iterator;
234 using const_bottom_up_ptr_iterator =
235 decltype(PerPtrBottomUp)::const_iterator;
236
bottom_up_ptr_begin()237 bottom_up_ptr_iterator bottom_up_ptr_begin() {
238 return PerPtrBottomUp.begin();
239 }
bottom_up_ptr_end()240 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
bottom_up_ptr_begin() const241 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
242 return PerPtrBottomUp.begin();
243 }
bottom_up_ptr_end() const244 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
245 return PerPtrBottomUp.end();
246 }
hasBottomUpPtrs() const247 bool hasBottomUpPtrs() const {
248 return !PerPtrBottomUp.empty();
249 }
250
bottom_up_ptr_list_size() const251 unsigned bottom_up_ptr_list_size() const {
252 return std::distance(bottom_up_ptr_begin(), bottom_up_ptr_end());
253 }
254
255 /// Mark this block as being an entry block, which has one path from the
256 /// entry by definition.
SetAsEntry()257 void SetAsEntry() { TopDownPathCount = 1; }
258
259 /// Mark this block as being an exit block, which has one path to an exit by
260 /// definition.
SetAsExit()261 void SetAsExit() { BottomUpPathCount = 1; }
262
263 /// Attempt to find the PtrState object describing the top down state for
264 /// pointer Arg. Return a new initialized PtrState describing the top down
265 /// state for Arg if we do not find one.
getPtrTopDownState(const Value * Arg)266 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
267 return PerPtrTopDown[Arg];
268 }
269
270 /// Attempt to find the PtrState object describing the bottom up state for
271 /// pointer Arg. Return a new initialized PtrState describing the bottom up
272 /// state for Arg if we do not find one.
getPtrBottomUpState(const Value * Arg)273 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
274 return PerPtrBottomUp[Arg];
275 }
276
277 /// Attempt to find the PtrState object describing the bottom up state for
278 /// pointer Arg.
findPtrBottomUpState(const Value * Arg)279 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
280 return PerPtrBottomUp.find(Arg);
281 }
282
clearBottomUpPointers()283 void clearBottomUpPointers() {
284 PerPtrBottomUp.clear();
285 }
286
clearTopDownPointers()287 void clearTopDownPointers() {
288 PerPtrTopDown.clear();
289 }
290
291 void InitFromPred(const BBState &Other);
292 void InitFromSucc(const BBState &Other);
293 void MergePred(const BBState &Other);
294 void MergeSucc(const BBState &Other);
295
296 /// Compute the number of possible unique paths from an entry to an exit
297 /// which pass through this block. This is only valid after both the
298 /// top-down and bottom-up traversals are complete.
299 ///
300 /// Returns true if overflow occurred. Returns false if overflow did not
301 /// occur.
GetAllPathCountWithOverflow(unsigned & PathCount) const302 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
303 if (TopDownPathCount == OverflowOccurredValue ||
304 BottomUpPathCount == OverflowOccurredValue)
305 return true;
306 unsigned long long Product =
307 (unsigned long long)TopDownPathCount*BottomUpPathCount;
308 // Overflow occurred if any of the upper bits of Product are set or if all
309 // the lower bits of Product are all set.
310 return (Product >> 32) ||
311 ((PathCount = Product) == OverflowOccurredValue);
312 }
313
314 // Specialized CFG utilities.
315 using edge_iterator = SmallVectorImpl<BasicBlock *>::const_iterator;
316
pred_begin() const317 edge_iterator pred_begin() const { return Preds.begin(); }
pred_end() const318 edge_iterator pred_end() const { return Preds.end(); }
succ_begin() const319 edge_iterator succ_begin() const { return Succs.begin(); }
succ_end() const320 edge_iterator succ_end() const { return Succs.end(); }
321
addSucc(BasicBlock * Succ)322 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
addPred(BasicBlock * Pred)323 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
324
isExit() const325 bool isExit() const { return Succs.empty(); }
326 };
327
328 } // end anonymous namespace
329
330 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
331
332 namespace llvm {
333
334 raw_ostream &operator<<(raw_ostream &OS,
335 BBState &BBState) LLVM_ATTRIBUTE_UNUSED;
336
337 } // end namespace llvm
338
InitFromPred(const BBState & Other)339 void BBState::InitFromPred(const BBState &Other) {
340 PerPtrTopDown = Other.PerPtrTopDown;
341 TopDownPathCount = Other.TopDownPathCount;
342 }
343
InitFromSucc(const BBState & Other)344 void BBState::InitFromSucc(const BBState &Other) {
345 PerPtrBottomUp = Other.PerPtrBottomUp;
346 BottomUpPathCount = Other.BottomUpPathCount;
347 }
348
349 /// The top-down traversal uses this to merge information about predecessors to
350 /// form the initial state for a new block.
MergePred(const BBState & Other)351 void BBState::MergePred(const BBState &Other) {
352 if (TopDownPathCount == OverflowOccurredValue)
353 return;
354
355 // Other.TopDownPathCount can be 0, in which case it is either dead or a
356 // loop backedge. Loop backedges are special.
357 TopDownPathCount += Other.TopDownPathCount;
358
359 // In order to be consistent, we clear the top down pointers when by adding
360 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
361 // has not occurred.
362 if (TopDownPathCount == OverflowOccurredValue) {
363 clearTopDownPointers();
364 return;
365 }
366
367 // Check for overflow. If we have overflow, fall back to conservative
368 // behavior.
369 if (TopDownPathCount < Other.TopDownPathCount) {
370 TopDownPathCount = OverflowOccurredValue;
371 clearTopDownPointers();
372 return;
373 }
374
375 // For each entry in the other set, if our set has an entry with the same key,
376 // merge the entries. Otherwise, copy the entry and merge it with an empty
377 // entry.
378 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
379 MI != ME; ++MI) {
380 auto Pair = PerPtrTopDown.insert(*MI);
381 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
382 /*TopDown=*/true);
383 }
384
385 // For each entry in our set, if the other set doesn't have an entry with the
386 // same key, force it to merge with an empty entry.
387 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
388 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
389 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
390 }
391
392 /// The bottom-up traversal uses this to merge information about successors to
393 /// form the initial state for a new block.
MergeSucc(const BBState & Other)394 void BBState::MergeSucc(const BBState &Other) {
395 if (BottomUpPathCount == OverflowOccurredValue)
396 return;
397
398 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
399 // loop backedge. Loop backedges are special.
400 BottomUpPathCount += Other.BottomUpPathCount;
401
402 // In order to be consistent, we clear the top down pointers when by adding
403 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
404 // has not occurred.
405 if (BottomUpPathCount == OverflowOccurredValue) {
406 clearBottomUpPointers();
407 return;
408 }
409
410 // Check for overflow. If we have overflow, fall back to conservative
411 // behavior.
412 if (BottomUpPathCount < Other.BottomUpPathCount) {
413 BottomUpPathCount = OverflowOccurredValue;
414 clearBottomUpPointers();
415 return;
416 }
417
418 // For each entry in the other set, if our set has an entry with the
419 // same key, merge the entries. Otherwise, copy the entry and merge
420 // it with an empty entry.
421 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
422 MI != ME; ++MI) {
423 auto Pair = PerPtrBottomUp.insert(*MI);
424 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
425 /*TopDown=*/false);
426 }
427
428 // For each entry in our set, if the other set doesn't have an entry
429 // with the same key, force it to merge with an empty entry.
430 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
431 ++MI)
432 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
433 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
434 }
435
operator <<(raw_ostream & OS,BBState & BBInfo)436 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
437 // Dump the pointers we are tracking.
438 OS << " TopDown State:\n";
439 if (!BBInfo.hasTopDownPtrs()) {
440 LLVM_DEBUG(dbgs() << " NONE!\n");
441 } else {
442 for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
443 I != E; ++I) {
444 const PtrState &P = I->second;
445 OS << " Ptr: " << *I->first
446 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
447 << "\n ImpreciseRelease: "
448 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
449 << " HasCFGHazards: "
450 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
451 << " KnownPositive: "
452 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
453 << " Seq: "
454 << P.GetSeq() << "\n";
455 }
456 }
457
458 OS << " BottomUp State:\n";
459 if (!BBInfo.hasBottomUpPtrs()) {
460 LLVM_DEBUG(dbgs() << " NONE!\n");
461 } else {
462 for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
463 I != E; ++I) {
464 const PtrState &P = I->second;
465 OS << " Ptr: " << *I->first
466 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
467 << "\n ImpreciseRelease: "
468 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
469 << " HasCFGHazards: "
470 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
471 << " KnownPositive: "
472 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
473 << " Seq: "
474 << P.GetSeq() << "\n";
475 }
476 }
477
478 return OS;
479 }
480
481 namespace {
482
483 /// The main ARC optimization pass.
484 class ObjCARCOpt : public FunctionPass {
485 bool Changed;
486 ProvenanceAnalysis PA;
487
488 /// A cache of references to runtime entry point constants.
489 ARCRuntimeEntryPoints EP;
490
491 /// A cache of MDKinds that can be passed into other functions to propagate
492 /// MDKind identifiers.
493 ARCMDKindCache MDKindCache;
494
495 /// A flag indicating whether this optimization pass should run.
496 bool Run;
497
498 /// A flag indicating whether the optimization that removes or moves
499 /// retain/release pairs should be performed.
500 bool DisableRetainReleasePairing = false;
501
502 /// Flags which determine whether each of the interesting runtime functions
503 /// is in fact used in the current function.
504 unsigned UsedInThisFunction;
505
506 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
507 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
508 ARCInstKind &Class);
509 void OptimizeIndividualCalls(Function &F);
510
511 /// Optimize an individual call, optionally passing the
512 /// GetArgRCIdentityRoot if it has already been computed.
513 void OptimizeIndividualCallImpl(
514 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
515 Instruction *Inst, ARCInstKind Class, const Value *Arg);
516
517 /// Try to optimize an AutoreleaseRV with a RetainRV or ClaimRV. If the
518 /// optimization occurs, returns true to indicate that the caller should
519 /// assume the instructions are dead.
520 bool OptimizeInlinedAutoreleaseRVCall(
521 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
522 Instruction *Inst, const Value *&Arg, ARCInstKind Class,
523 Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg);
524
525 void CheckForCFGHazards(const BasicBlock *BB,
526 DenseMap<const BasicBlock *, BBState> &BBStates,
527 BBState &MyStates) const;
528 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
529 BlotMapVector<Value *, RRInfo> &Retains,
530 BBState &MyStates);
531 bool VisitBottomUp(BasicBlock *BB,
532 DenseMap<const BasicBlock *, BBState> &BBStates,
533 BlotMapVector<Value *, RRInfo> &Retains);
534 bool VisitInstructionTopDown(Instruction *Inst,
535 DenseMap<Value *, RRInfo> &Releases,
536 BBState &MyStates);
537 bool VisitTopDown(BasicBlock *BB,
538 DenseMap<const BasicBlock *, BBState> &BBStates,
539 DenseMap<Value *, RRInfo> &Releases);
540 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
541 BlotMapVector<Value *, RRInfo> &Retains,
542 DenseMap<Value *, RRInfo> &Releases);
543
544 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
545 BlotMapVector<Value *, RRInfo> &Retains,
546 DenseMap<Value *, RRInfo> &Releases,
547 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
548
549 bool
550 PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
551 BlotMapVector<Value *, RRInfo> &Retains,
552 DenseMap<Value *, RRInfo> &Releases, Module *M,
553 Instruction * Retain,
554 SmallVectorImpl<Instruction *> &DeadInsts,
555 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
556 Value *Arg, bool KnownSafe,
557 bool &AnyPairsCompletelyEliminated);
558
559 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
560 BlotMapVector<Value *, RRInfo> &Retains,
561 DenseMap<Value *, RRInfo> &Releases, Module *M);
562
563 void OptimizeWeakCalls(Function &F);
564
565 bool OptimizeSequences(Function &F);
566
567 void OptimizeReturns(Function &F);
568
569 #ifndef NDEBUG
570 void GatherStatistics(Function &F, bool AfterOptimization = false);
571 #endif
572
573 void getAnalysisUsage(AnalysisUsage &AU) const override;
574 bool doInitialization(Module &M) override;
575 bool runOnFunction(Function &F) override;
576 void releaseMemory() override;
577
578 public:
579 static char ID;
580
ObjCARCOpt()581 ObjCARCOpt() : FunctionPass(ID) {
582 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
583 }
584 };
585
586 } // end anonymous namespace
587
588 char ObjCARCOpt::ID = 0;
589
590 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
591 "objc-arc", "ObjC ARC optimization", false, false)
INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)592 INITIALIZE_PASS_DEPENDENCY(ObjCARCAAWrapperPass)
593 INITIALIZE_PASS_END(ObjCARCOpt,
594 "objc-arc", "ObjC ARC optimization", false, false)
595
596 Pass *llvm::createObjCARCOptPass() {
597 return new ObjCARCOpt();
598 }
599
getAnalysisUsage(AnalysisUsage & AU) const600 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
601 AU.addRequired<ObjCARCAAWrapperPass>();
602 AU.addRequired<AAResultsWrapperPass>();
603 // ARC optimization doesn't currently split critical edges.
604 AU.setPreservesCFG();
605 }
606
607 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
608 /// not a return value.
609 bool
OptimizeRetainRVCall(Function & F,Instruction * RetainRV)610 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
611 // Check for the argument being from an immediately preceding call or invoke.
612 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
613 ImmutableCallSite CS(Arg);
614 if (const Instruction *Call = CS.getInstruction()) {
615 if (Call->getParent() == RetainRV->getParent()) {
616 BasicBlock::const_iterator I(Call);
617 ++I;
618 while (IsNoopInstruction(&*I))
619 ++I;
620 if (&*I == RetainRV)
621 return false;
622 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
623 BasicBlock *RetainRVParent = RetainRV->getParent();
624 if (II->getNormalDest() == RetainRVParent) {
625 BasicBlock::const_iterator I = RetainRVParent->begin();
626 while (IsNoopInstruction(&*I))
627 ++I;
628 if (&*I == RetainRV)
629 return false;
630 }
631 }
632 }
633
634 // Turn it to a plain objc_retain.
635 Changed = true;
636 ++NumPeeps;
637
638 LLVM_DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
639 "objc_retain since the operand is not a return value.\n"
640 "Old = "
641 << *RetainRV << "\n");
642
643 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
644 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
645
646 LLVM_DEBUG(dbgs() << "New = " << *RetainRV << "\n");
647
648 return false;
649 }
650
OptimizeInlinedAutoreleaseRVCall(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,const Value * & Arg,ARCInstKind Class,Instruction * AutoreleaseRV,const Value * & AutoreleaseRVArg)651 bool ObjCARCOpt::OptimizeInlinedAutoreleaseRVCall(
652 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
653 Instruction *Inst, const Value *&Arg, ARCInstKind Class,
654 Instruction *AutoreleaseRV, const Value *&AutoreleaseRVArg) {
655 // Must be in the same basic block.
656 assert(Inst->getParent() == AutoreleaseRV->getParent());
657
658 // Must operate on the same root.
659 Arg = GetArgRCIdentityRoot(Inst);
660 AutoreleaseRVArg = GetArgRCIdentityRoot(AutoreleaseRV);
661 if (Arg != AutoreleaseRVArg) {
662 // If there isn't an exact match, check if we have equivalent PHIs.
663 const PHINode *PN = dyn_cast<PHINode>(Arg);
664 if (!PN)
665 return false;
666
667 SmallVector<const Value *, 4> ArgUsers;
668 getEquivalentPHIs(*PN, ArgUsers);
669 if (llvm::find(ArgUsers, AutoreleaseRVArg) == ArgUsers.end())
670 return false;
671 }
672
673 // Okay, this is a match. Merge them.
674 ++NumPeeps;
675 LLVM_DEBUG(dbgs() << "Found inlined objc_autoreleaseReturnValue '"
676 << *AutoreleaseRV << "' paired with '" << *Inst << "'\n");
677
678 // Delete the RV pair, starting with the AutoreleaseRV.
679 AutoreleaseRV->replaceAllUsesWith(
680 cast<CallInst>(AutoreleaseRV)->getArgOperand(0));
681 EraseInstruction(AutoreleaseRV);
682 if (Class == ARCInstKind::RetainRV) {
683 // AutoreleaseRV and RetainRV cancel out. Delete the RetainRV.
684 Inst->replaceAllUsesWith(cast<CallInst>(Inst)->getArgOperand(0));
685 EraseInstruction(Inst);
686 return true;
687 }
688
689 // ClaimRV is a frontend peephole for RetainRV + Release. Since the
690 // AutoreleaseRV and RetainRV cancel out, replace the ClaimRV with a Release.
691 assert(Class == ARCInstKind::ClaimRV);
692 Value *CallArg = cast<CallInst>(Inst)->getArgOperand(0);
693 CallInst *Release = CallInst::Create(
694 EP.get(ARCRuntimeEntryPointKind::Release), CallArg, "", Inst);
695 assert(IsAlwaysTail(ARCInstKind::ClaimRV) &&
696 "Expected ClaimRV to be safe to tail call");
697 Release->setTailCall();
698 Inst->replaceAllUsesWith(CallArg);
699 EraseInstruction(Inst);
700
701 // Run the normal optimizations on Release.
702 OptimizeIndividualCallImpl(F, BlockColors, Release, ARCInstKind::Release,
703 Arg);
704 return true;
705 }
706
707 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
708 /// used as a return value.
OptimizeAutoreleaseRVCall(Function & F,Instruction * AutoreleaseRV,ARCInstKind & Class)709 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
710 Instruction *AutoreleaseRV,
711 ARCInstKind &Class) {
712 // Check for a return of the pointer value.
713 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
714
715 // If the argument is ConstantPointerNull or UndefValue, its other users
716 // aren't actually interesting to look at.
717 if (isa<ConstantData>(Ptr))
718 return;
719
720 SmallVector<const Value *, 2> Users;
721 Users.push_back(Ptr);
722
723 // Add PHIs that are equivalent to Ptr to Users.
724 if (const PHINode *PN = dyn_cast<PHINode>(Ptr))
725 getEquivalentPHIs(*PN, Users);
726
727 do {
728 Ptr = Users.pop_back_val();
729 for (const User *U : Ptr->users()) {
730 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
731 return;
732 if (isa<BitCastInst>(U))
733 Users.push_back(U);
734 }
735 } while (!Users.empty());
736
737 Changed = true;
738 ++NumPeeps;
739
740 LLVM_DEBUG(
741 dbgs() << "Transforming objc_autoreleaseReturnValue => "
742 "objc_autorelease since its operand is not used as a return "
743 "value.\n"
744 "Old = "
745 << *AutoreleaseRV << "\n");
746
747 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
748 Function *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
749 AutoreleaseRVCI->setCalledFunction(NewDecl);
750 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
751 Class = ARCInstKind::Autorelease;
752
753 LLVM_DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
754 }
755
756 namespace {
757 Instruction *
CloneCallInstForBB(CallInst & CI,BasicBlock & BB,const DenseMap<BasicBlock *,ColorVector> & BlockColors)758 CloneCallInstForBB(CallInst &CI, BasicBlock &BB,
759 const DenseMap<BasicBlock *, ColorVector> &BlockColors) {
760 SmallVector<OperandBundleDef, 1> OpBundles;
761 for (unsigned I = 0, E = CI.getNumOperandBundles(); I != E; ++I) {
762 auto Bundle = CI.getOperandBundleAt(I);
763 // Funclets will be reassociated in the future.
764 if (Bundle.getTagID() == LLVMContext::OB_funclet)
765 continue;
766 OpBundles.emplace_back(Bundle);
767 }
768
769 if (!BlockColors.empty()) {
770 const ColorVector &CV = BlockColors.find(&BB)->second;
771 assert(CV.size() == 1 && "non-unique color for block!");
772 Instruction *EHPad = CV.front()->getFirstNonPHI();
773 if (EHPad->isEHPad())
774 OpBundles.emplace_back("funclet", EHPad);
775 }
776
777 return CallInst::Create(&CI, OpBundles);
778 }
779 }
780
781 /// Visit each call, one at a time, and make simplifications without doing any
782 /// additional analysis.
OptimizeIndividualCalls(Function & F)783 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
784 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
785 // Reset all the flags in preparation for recomputing them.
786 UsedInThisFunction = 0;
787
788 DenseMap<BasicBlock *, ColorVector> BlockColors;
789 if (F.hasPersonalityFn() &&
790 isScopedEHPersonality(classifyEHPersonality(F.getPersonalityFn())))
791 BlockColors = colorEHFunclets(F);
792
793 // Store any delayed AutoreleaseRV intrinsics, so they can be easily paired
794 // with RetainRV and ClaimRV.
795 Instruction *DelayedAutoreleaseRV = nullptr;
796 const Value *DelayedAutoreleaseRVArg = nullptr;
797 auto setDelayedAutoreleaseRV = [&](Instruction *AutoreleaseRV) {
798 assert(!DelayedAutoreleaseRV || !AutoreleaseRV);
799 DelayedAutoreleaseRV = AutoreleaseRV;
800 DelayedAutoreleaseRVArg = nullptr;
801 };
802 auto optimizeDelayedAutoreleaseRV = [&]() {
803 if (!DelayedAutoreleaseRV)
804 return;
805 OptimizeIndividualCallImpl(F, BlockColors, DelayedAutoreleaseRV,
806 ARCInstKind::AutoreleaseRV,
807 DelayedAutoreleaseRVArg);
808 setDelayedAutoreleaseRV(nullptr);
809 };
810 auto shouldDelayAutoreleaseRV = [&](Instruction *NonARCInst) {
811 // Nothing to delay, but we may as well skip the logic below.
812 if (!DelayedAutoreleaseRV)
813 return true;
814
815 // If we hit the end of the basic block we're not going to find an RV-pair.
816 // Stop delaying.
817 if (NonARCInst->isTerminator())
818 return false;
819
820 // Given the frontend rules for emitting AutoreleaseRV, RetainRV, and
821 // ClaimRV, it's probably safe to skip over even opaque function calls
822 // here since OptimizeInlinedAutoreleaseRVCall will confirm that they
823 // have the same RCIdentityRoot. However, what really matters is
824 // skipping instructions or intrinsics that the inliner could leave behind;
825 // be conservative for now and don't skip over opaque calls, which could
826 // potentially include other ARC calls.
827 auto *CB = dyn_cast<CallBase>(NonARCInst);
828 if (!CB)
829 return true;
830 return CB->getIntrinsicID() != Intrinsic::not_intrinsic;
831 };
832
833 // Visit all objc_* calls in F.
834 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
835 Instruction *Inst = &*I++;
836
837 ARCInstKind Class = GetBasicARCInstKind(Inst);
838
839 // Skip this loop if this instruction isn't itself an ARC intrinsic.
840 const Value *Arg = nullptr;
841 switch (Class) {
842 default:
843 optimizeDelayedAutoreleaseRV();
844 break;
845 case ARCInstKind::CallOrUser:
846 case ARCInstKind::User:
847 case ARCInstKind::None:
848 // This is a non-ARC instruction. If we're delaying an AutoreleaseRV,
849 // check if it's safe to skip over it; if not, optimize the AutoreleaseRV
850 // now.
851 if (!shouldDelayAutoreleaseRV(Inst))
852 optimizeDelayedAutoreleaseRV();
853 continue;
854 case ARCInstKind::AutoreleaseRV:
855 optimizeDelayedAutoreleaseRV();
856 setDelayedAutoreleaseRV(Inst);
857 continue;
858 case ARCInstKind::RetainRV:
859 case ARCInstKind::ClaimRV:
860 if (DelayedAutoreleaseRV) {
861 // We have a potential RV pair. Check if they cancel out.
862 if (OptimizeInlinedAutoreleaseRVCall(F, BlockColors, Inst, Arg, Class,
863 DelayedAutoreleaseRV,
864 DelayedAutoreleaseRVArg)) {
865 setDelayedAutoreleaseRV(nullptr);
866 continue;
867 }
868 optimizeDelayedAutoreleaseRV();
869 }
870 break;
871 }
872
873 OptimizeIndividualCallImpl(F, BlockColors, Inst, Class, Arg);
874 }
875
876 // Catch the final delayed AutoreleaseRV.
877 optimizeDelayedAutoreleaseRV();
878 }
879
OptimizeIndividualCallImpl(Function & F,DenseMap<BasicBlock *,ColorVector> & BlockColors,Instruction * Inst,ARCInstKind Class,const Value * Arg)880 void ObjCARCOpt::OptimizeIndividualCallImpl(
881 Function &F, DenseMap<BasicBlock *, ColorVector> &BlockColors,
882 Instruction *Inst, ARCInstKind Class, const Value *Arg) {
883 LLVM_DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
884
885 // Some of the ARC calls can be deleted if their arguments are global
886 // variables that are inert in ARC.
887 if (IsNoopOnGlobal(Class)) {
888 Value *Opnd = Inst->getOperand(0);
889 if (auto *GV = dyn_cast<GlobalVariable>(Opnd->stripPointerCasts()))
890 if (GV->hasAttribute("objc_arc_inert")) {
891 if (!Inst->getType()->isVoidTy())
892 Inst->replaceAllUsesWith(Opnd);
893 Inst->eraseFromParent();
894 return;
895 }
896 }
897
898 switch (Class) {
899 default:
900 break;
901
902 // Delete no-op casts. These function calls have special semantics, but
903 // the semantics are entirely implemented via lowering in the front-end,
904 // so by the time they reach the optimizer, they are just no-op calls
905 // which return their argument.
906 //
907 // There are gray areas here, as the ability to cast reference-counted
908 // pointers to raw void* and back allows code to break ARC assumptions,
909 // however these are currently considered to be unimportant.
910 case ARCInstKind::NoopCast:
911 Changed = true;
912 ++NumNoops;
913 LLVM_DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
914 EraseInstruction(Inst);
915 return;
916
917 // If the pointer-to-weak-pointer is null, it's undefined behavior.
918 case ARCInstKind::StoreWeak:
919 case ARCInstKind::LoadWeak:
920 case ARCInstKind::LoadWeakRetained:
921 case ARCInstKind::InitWeak:
922 case ARCInstKind::DestroyWeak: {
923 CallInst *CI = cast<CallInst>(Inst);
924 if (IsNullOrUndef(CI->getArgOperand(0))) {
925 Changed = true;
926 Type *Ty = CI->getArgOperand(0)->getType();
927 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
928 Constant::getNullValue(Ty), CI);
929 Value *NewValue = UndefValue::get(CI->getType());
930 LLVM_DEBUG(
931 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
932 "\nOld = "
933 << *CI << "\nNew = " << *NewValue << "\n");
934 CI->replaceAllUsesWith(NewValue);
935 CI->eraseFromParent();
936 return;
937 }
938 break;
939 }
940 case ARCInstKind::CopyWeak:
941 case ARCInstKind::MoveWeak: {
942 CallInst *CI = cast<CallInst>(Inst);
943 if (IsNullOrUndef(CI->getArgOperand(0)) ||
944 IsNullOrUndef(CI->getArgOperand(1))) {
945 Changed = true;
946 Type *Ty = CI->getArgOperand(0)->getType();
947 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
948 Constant::getNullValue(Ty), CI);
949
950 Value *NewValue = UndefValue::get(CI->getType());
951 LLVM_DEBUG(
952 dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
953 "\nOld = "
954 << *CI << "\nNew = " << *NewValue << "\n");
955
956 CI->replaceAllUsesWith(NewValue);
957 CI->eraseFromParent();
958 return;
959 }
960 break;
961 }
962 case ARCInstKind::RetainRV:
963 if (OptimizeRetainRVCall(F, Inst))
964 return;
965 break;
966 case ARCInstKind::AutoreleaseRV:
967 OptimizeAutoreleaseRVCall(F, Inst, Class);
968 break;
969 }
970
971 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
972 if (IsAutorelease(Class) && Inst->use_empty()) {
973 CallInst *Call = cast<CallInst>(Inst);
974 const Value *Arg = Call->getArgOperand(0);
975 Arg = FindSingleUseIdentifiedObject(Arg);
976 if (Arg) {
977 Changed = true;
978 ++NumAutoreleases;
979
980 // Create the declaration lazily.
981 LLVMContext &C = Inst->getContext();
982
983 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
984 CallInst *NewCall =
985 CallInst::Create(Decl, Call->getArgOperand(0), "", Call);
986 NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
987 MDNode::get(C, None));
988
989 LLVM_DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
990 "since x is otherwise unused.\nOld: "
991 << *Call << "\nNew: " << *NewCall << "\n");
992
993 EraseInstruction(Call);
994 Inst = NewCall;
995 Class = ARCInstKind::Release;
996 }
997 }
998
999 // For functions which can never be passed stack arguments, add
1000 // a tail keyword.
1001 if (IsAlwaysTail(Class) && !cast<CallInst>(Inst)->isNoTailCall()) {
1002 Changed = true;
1003 LLVM_DEBUG(
1004 dbgs() << "Adding tail keyword to function since it can never be "
1005 "passed stack args: "
1006 << *Inst << "\n");
1007 cast<CallInst>(Inst)->setTailCall();
1008 }
1009
1010 // Ensure that functions that can never have a "tail" keyword due to the
1011 // semantics of ARC truly do not do so.
1012 if (IsNeverTail(Class)) {
1013 Changed = true;
1014 LLVM_DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst
1015 << "\n");
1016 cast<CallInst>(Inst)->setTailCall(false);
1017 }
1018
1019 // Set nounwind as needed.
1020 if (IsNoThrow(Class)) {
1021 Changed = true;
1022 LLVM_DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1023 << "\n");
1024 cast<CallInst>(Inst)->setDoesNotThrow();
1025 }
1026
1027 // Note: This catches instructions unrelated to ARC.
1028 if (!IsNoopOnNull(Class)) {
1029 UsedInThisFunction |= 1 << unsigned(Class);
1030 return;
1031 }
1032
1033 // If we haven't already looked up the root, look it up now.
1034 if (!Arg)
1035 Arg = GetArgRCIdentityRoot(Inst);
1036
1037 // ARC calls with null are no-ops. Delete them.
1038 if (IsNullOrUndef(Arg)) {
1039 Changed = true;
1040 ++NumNoops;
1041 LLVM_DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1042 << "\n");
1043 EraseInstruction(Inst);
1044 return;
1045 }
1046
1047 // Keep track of which of retain, release, autorelease, and retain_block
1048 // are actually present in this function.
1049 UsedInThisFunction |= 1 << unsigned(Class);
1050
1051 // If Arg is a PHI, and one or more incoming values to the
1052 // PHI are null, and the call is control-equivalent to the PHI, and there
1053 // are no relevant side effects between the PHI and the call, and the call
1054 // is not a release that doesn't have the clang.imprecise_release tag, the
1055 // call could be pushed up to just those paths with non-null incoming
1056 // values. For now, don't bother splitting critical edges for this.
1057 if (Class == ARCInstKind::Release &&
1058 !Inst->getMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease)))
1059 return;
1060
1061 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1062 Worklist.push_back(std::make_pair(Inst, Arg));
1063 do {
1064 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1065 Inst = Pair.first;
1066 Arg = Pair.second;
1067
1068 const PHINode *PN = dyn_cast<PHINode>(Arg);
1069 if (!PN)
1070 continue;
1071
1072 // Determine if the PHI has any null operands, or any incoming
1073 // critical edges.
1074 bool HasNull = false;
1075 bool HasCriticalEdges = false;
1076 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1077 Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1078 if (IsNullOrUndef(Incoming))
1079 HasNull = true;
1080 else if (PN->getIncomingBlock(i)->getTerminator()->getNumSuccessors() !=
1081 1) {
1082 HasCriticalEdges = true;
1083 break;
1084 }
1085 }
1086 // If we have null operands and no critical edges, optimize.
1087 if (HasCriticalEdges)
1088 continue;
1089 if (!HasNull)
1090 continue;
1091
1092 SmallPtrSet<Instruction *, 4> DependingInstructions;
1093 SmallPtrSet<const BasicBlock *, 4> Visited;
1094
1095 // Check that there is nothing that cares about the reference
1096 // count between the call and the phi.
1097 switch (Class) {
1098 case ARCInstKind::Retain:
1099 case ARCInstKind::RetainBlock:
1100 // These can always be moved up.
1101 break;
1102 case ARCInstKind::Release:
1103 // These can't be moved across things that care about the retain
1104 // count.
1105 FindDependencies(NeedsPositiveRetainCount, Arg, Inst->getParent(), Inst,
1106 DependingInstructions, Visited, PA);
1107 break;
1108 case ARCInstKind::Autorelease:
1109 // These can't be moved across autorelease pool scope boundaries.
1110 FindDependencies(AutoreleasePoolBoundary, Arg, Inst->getParent(), Inst,
1111 DependingInstructions, Visited, PA);
1112 break;
1113 case ARCInstKind::ClaimRV:
1114 case ARCInstKind::RetainRV:
1115 case ARCInstKind::AutoreleaseRV:
1116 // Don't move these; the RV optimization depends on the autoreleaseRV
1117 // being tail called, and the retainRV being immediately after a call
1118 // (which might still happen if we get lucky with codegen layout, but
1119 // it's not worth taking the chance).
1120 continue;
1121 default:
1122 llvm_unreachable("Invalid dependence flavor");
1123 }
1124
1125 if (DependingInstructions.size() != 1)
1126 continue;
1127 if (*DependingInstructions.begin() != PN)
1128 continue;
1129
1130 Changed = true;
1131 ++NumPartialNoops;
1132 // Clone the call into each predecessor that has a non-null value.
1133 CallInst *CInst = cast<CallInst>(Inst);
1134 Type *ParamTy = CInst->getArgOperand(0)->getType();
1135 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1136 Value *Incoming = GetRCIdentityRoot(PN->getIncomingValue(i));
1137 if (IsNullOrUndef(Incoming))
1138 continue;
1139 Value *Op = PN->getIncomingValue(i);
1140 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1141 CallInst *Clone = cast<CallInst>(
1142 CloneCallInstForBB(*CInst, *InsertPos->getParent(), BlockColors));
1143 if (Op->getType() != ParamTy)
1144 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1145 Clone->setArgOperand(0, Op);
1146 Clone->insertBefore(InsertPos);
1147
1148 LLVM_DEBUG(dbgs() << "Cloning " << *CInst << "\n"
1149 "And inserting clone at "
1150 << *InsertPos << "\n");
1151 Worklist.push_back(std::make_pair(Clone, Incoming));
1152 }
1153 // Erase the original call.
1154 LLVM_DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1155 EraseInstruction(CInst);
1156 } while (!Worklist.empty());
1157 }
1158
1159 /// If we have a top down pointer in the S_Use state, make sure that there are
1160 /// no CFG hazards by checking the states of various bottom up pointers.
CheckForUseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe,bool & ShouldContinue)1161 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1162 const bool SuccSRRIKnownSafe,
1163 TopDownPtrState &S,
1164 bool &SomeSuccHasSame,
1165 bool &AllSuccsHaveSame,
1166 bool &NotAllSeqEqualButKnownSafe,
1167 bool &ShouldContinue) {
1168 switch (SuccSSeq) {
1169 case S_CanRelease: {
1170 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1171 S.ClearSequenceProgress();
1172 break;
1173 }
1174 S.SetCFGHazardAfflicted(true);
1175 ShouldContinue = true;
1176 break;
1177 }
1178 case S_Use:
1179 SomeSuccHasSame = true;
1180 break;
1181 case S_Stop:
1182 case S_Release:
1183 case S_MovableRelease:
1184 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1185 AllSuccsHaveSame = false;
1186 else
1187 NotAllSeqEqualButKnownSafe = true;
1188 break;
1189 case S_Retain:
1190 llvm_unreachable("bottom-up pointer in retain state!");
1191 case S_None:
1192 llvm_unreachable("This should have been handled earlier.");
1193 }
1194 }
1195
1196 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1197 /// there are no CFG hazards by checking the states of various bottom up
1198 /// pointers.
CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,const bool SuccSRRIKnownSafe,TopDownPtrState & S,bool & SomeSuccHasSame,bool & AllSuccsHaveSame,bool & NotAllSeqEqualButKnownSafe)1199 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1200 const bool SuccSRRIKnownSafe,
1201 TopDownPtrState &S,
1202 bool &SomeSuccHasSame,
1203 bool &AllSuccsHaveSame,
1204 bool &NotAllSeqEqualButKnownSafe) {
1205 switch (SuccSSeq) {
1206 case S_CanRelease:
1207 SomeSuccHasSame = true;
1208 break;
1209 case S_Stop:
1210 case S_Release:
1211 case S_MovableRelease:
1212 case S_Use:
1213 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1214 AllSuccsHaveSame = false;
1215 else
1216 NotAllSeqEqualButKnownSafe = true;
1217 break;
1218 case S_Retain:
1219 llvm_unreachable("bottom-up pointer in retain state!");
1220 case S_None:
1221 llvm_unreachable("This should have been handled earlier.");
1222 }
1223 }
1224
1225 /// Check for critical edges, loop boundaries, irreducible control flow, or
1226 /// other CFG structures where moving code across the edge would result in it
1227 /// being executed more.
1228 void
CheckForCFGHazards(const BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BBState & MyStates) const1229 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1230 DenseMap<const BasicBlock *, BBState> &BBStates,
1231 BBState &MyStates) const {
1232 // If any top-down local-use or possible-dec has a succ which is earlier in
1233 // the sequence, forget it.
1234 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1235 I != E; ++I) {
1236 TopDownPtrState &S = I->second;
1237 const Sequence Seq = I->second.GetSeq();
1238
1239 // We only care about S_Retain, S_CanRelease, and S_Use.
1240 if (Seq == S_None)
1241 continue;
1242
1243 // Make sure that if extra top down states are added in the future that this
1244 // code is updated to handle it.
1245 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1246 "Unknown top down sequence state.");
1247
1248 const Value *Arg = I->first;
1249 bool SomeSuccHasSame = false;
1250 bool AllSuccsHaveSame = true;
1251 bool NotAllSeqEqualButKnownSafe = false;
1252
1253 for (const BasicBlock *Succ : successors(BB)) {
1254 // If VisitBottomUp has pointer information for this successor, take
1255 // what we know about it.
1256 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1257 BBStates.find(Succ);
1258 assert(BBI != BBStates.end());
1259 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1260 const Sequence SuccSSeq = SuccS.GetSeq();
1261
1262 // If bottom up, the pointer is in an S_None state, clear the sequence
1263 // progress since the sequence in the bottom up state finished
1264 // suggesting a mismatch in between retains/releases. This is true for
1265 // all three cases that we are handling here: S_Retain, S_Use, and
1266 // S_CanRelease.
1267 if (SuccSSeq == S_None) {
1268 S.ClearSequenceProgress();
1269 continue;
1270 }
1271
1272 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1273 // checks.
1274 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1275
1276 // *NOTE* We do not use Seq from above here since we are allowing for
1277 // S.GetSeq() to change while we are visiting basic blocks.
1278 switch(S.GetSeq()) {
1279 case S_Use: {
1280 bool ShouldContinue = false;
1281 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1282 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1283 ShouldContinue);
1284 if (ShouldContinue)
1285 continue;
1286 break;
1287 }
1288 case S_CanRelease:
1289 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1290 SomeSuccHasSame, AllSuccsHaveSame,
1291 NotAllSeqEqualButKnownSafe);
1292 break;
1293 case S_Retain:
1294 case S_None:
1295 case S_Stop:
1296 case S_Release:
1297 case S_MovableRelease:
1298 break;
1299 }
1300 }
1301
1302 // If the state at the other end of any of the successor edges
1303 // matches the current state, require all edges to match. This
1304 // guards against loops in the middle of a sequence.
1305 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1306 S.ClearSequenceProgress();
1307 } else if (NotAllSeqEqualButKnownSafe) {
1308 // If we would have cleared the state foregoing the fact that we are known
1309 // safe, stop code motion. This is because whether or not it is safe to
1310 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1311 // are allowed to perform code motion.
1312 S.SetCFGHazardAfflicted(true);
1313 }
1314 }
1315 }
1316
VisitInstructionBottomUp(Instruction * Inst,BasicBlock * BB,BlotMapVector<Value *,RRInfo> & Retains,BBState & MyStates)1317 bool ObjCARCOpt::VisitInstructionBottomUp(
1318 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1319 BBState &MyStates) {
1320 bool NestingDetected = false;
1321 ARCInstKind Class = GetARCInstKind(Inst);
1322 const Value *Arg = nullptr;
1323
1324 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1325
1326 switch (Class) {
1327 case ARCInstKind::Release: {
1328 Arg = GetArgRCIdentityRoot(Inst);
1329
1330 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1331 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1332 break;
1333 }
1334 case ARCInstKind::RetainBlock:
1335 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1336 // objc_retainBlocks to objc_retains. Thus at this point any
1337 // objc_retainBlocks that we see are not optimizable.
1338 break;
1339 case ARCInstKind::Retain:
1340 case ARCInstKind::RetainRV: {
1341 Arg = GetArgRCIdentityRoot(Inst);
1342 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1343 if (S.MatchWithRetain()) {
1344 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1345 // it's better to let it remain as the first instruction after a call.
1346 if (Class != ARCInstKind::RetainRV) {
1347 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1348 Retains[Inst] = S.GetRRInfo();
1349 }
1350 S.ClearSequenceProgress();
1351 }
1352 // A retain moving bottom up can be a use.
1353 break;
1354 }
1355 case ARCInstKind::AutoreleasepoolPop:
1356 // Conservatively, clear MyStates for all known pointers.
1357 MyStates.clearBottomUpPointers();
1358 return NestingDetected;
1359 case ARCInstKind::AutoreleasepoolPush:
1360 case ARCInstKind::None:
1361 // These are irrelevant.
1362 return NestingDetected;
1363 default:
1364 break;
1365 }
1366
1367 // Consider any other possible effects of this instruction on each
1368 // pointer being tracked.
1369 for (auto MI = MyStates.bottom_up_ptr_begin(),
1370 ME = MyStates.bottom_up_ptr_end();
1371 MI != ME; ++MI) {
1372 const Value *Ptr = MI->first;
1373 if (Ptr == Arg)
1374 continue; // Handled above.
1375 BottomUpPtrState &S = MI->second;
1376
1377 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1378 continue;
1379
1380 S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1381 }
1382
1383 return NestingDetected;
1384 }
1385
VisitBottomUp(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains)1386 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1387 DenseMap<const BasicBlock *, BBState> &BBStates,
1388 BlotMapVector<Value *, RRInfo> &Retains) {
1389 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1390
1391 bool NestingDetected = false;
1392 BBState &MyStates = BBStates[BB];
1393
1394 // Merge the states from each successor to compute the initial state
1395 // for the current block.
1396 BBState::edge_iterator SI(MyStates.succ_begin()),
1397 SE(MyStates.succ_end());
1398 if (SI != SE) {
1399 const BasicBlock *Succ = *SI;
1400 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1401 assert(I != BBStates.end());
1402 MyStates.InitFromSucc(I->second);
1403 ++SI;
1404 for (; SI != SE; ++SI) {
1405 Succ = *SI;
1406 I = BBStates.find(Succ);
1407 assert(I != BBStates.end());
1408 MyStates.MergeSucc(I->second);
1409 }
1410 }
1411
1412 LLVM_DEBUG(dbgs() << "Before:\n"
1413 << BBStates[BB] << "\n"
1414 << "Performing Dataflow:\n");
1415
1416 // Visit all the instructions, bottom-up.
1417 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1418 Instruction *Inst = &*std::prev(I);
1419
1420 // Invoke instructions are visited as part of their successors (below).
1421 if (isa<InvokeInst>(Inst))
1422 continue;
1423
1424 LLVM_DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1425
1426 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1427
1428 // Bail out if the number of pointers being tracked becomes too large so
1429 // that this pass can complete in a reasonable amount of time.
1430 if (MyStates.bottom_up_ptr_list_size() > MaxPtrStates) {
1431 DisableRetainReleasePairing = true;
1432 return false;
1433 }
1434 }
1435
1436 // If there's a predecessor with an invoke, visit the invoke as if it were
1437 // part of this block, since we can't insert code after an invoke in its own
1438 // block, and we don't want to split critical edges.
1439 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1440 PE(MyStates.pred_end()); PI != PE; ++PI) {
1441 BasicBlock *Pred = *PI;
1442 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1443 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1444 }
1445
1446 LLVM_DEBUG(dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1447
1448 return NestingDetected;
1449 }
1450
1451 bool
VisitInstructionTopDown(Instruction * Inst,DenseMap<Value *,RRInfo> & Releases,BBState & MyStates)1452 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1453 DenseMap<Value *, RRInfo> &Releases,
1454 BBState &MyStates) {
1455 bool NestingDetected = false;
1456 ARCInstKind Class = GetARCInstKind(Inst);
1457 const Value *Arg = nullptr;
1458
1459 LLVM_DEBUG(dbgs() << " Class: " << Class << "\n");
1460
1461 switch (Class) {
1462 case ARCInstKind::RetainBlock:
1463 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1464 // objc_retainBlocks to objc_retains. Thus at this point any
1465 // objc_retainBlocks that we see are not optimizable. We need to break since
1466 // a retain can be a potential use.
1467 break;
1468 case ARCInstKind::Retain:
1469 case ARCInstKind::RetainRV: {
1470 Arg = GetArgRCIdentityRoot(Inst);
1471 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1472 NestingDetected |= S.InitTopDown(Class, Inst);
1473 // A retain can be a potential use; proceed to the generic checking
1474 // code below.
1475 break;
1476 }
1477 case ARCInstKind::Release: {
1478 Arg = GetArgRCIdentityRoot(Inst);
1479 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1480 // Try to form a tentative pair in between this release instruction and the
1481 // top down pointers that we are tracking.
1482 if (S.MatchWithRelease(MDKindCache, Inst)) {
1483 // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1484 // Map}. Then we clear S.
1485 LLVM_DEBUG(dbgs() << " Matching with: " << *Inst << "\n");
1486 Releases[Inst] = S.GetRRInfo();
1487 S.ClearSequenceProgress();
1488 }
1489 break;
1490 }
1491 case ARCInstKind::AutoreleasepoolPop:
1492 // Conservatively, clear MyStates for all known pointers.
1493 MyStates.clearTopDownPointers();
1494 return false;
1495 case ARCInstKind::AutoreleasepoolPush:
1496 case ARCInstKind::None:
1497 // These can not be uses of
1498 return false;
1499 default:
1500 break;
1501 }
1502
1503 // Consider any other possible effects of this instruction on each
1504 // pointer being tracked.
1505 for (auto MI = MyStates.top_down_ptr_begin(),
1506 ME = MyStates.top_down_ptr_end();
1507 MI != ME; ++MI) {
1508 const Value *Ptr = MI->first;
1509 if (Ptr == Arg)
1510 continue; // Handled above.
1511 TopDownPtrState &S = MI->second;
1512 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1513 continue;
1514
1515 S.HandlePotentialUse(Inst, Ptr, PA, Class);
1516 }
1517
1518 return NestingDetected;
1519 }
1520
1521 bool
VisitTopDown(BasicBlock * BB,DenseMap<const BasicBlock *,BBState> & BBStates,DenseMap<Value *,RRInfo> & Releases)1522 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1523 DenseMap<const BasicBlock *, BBState> &BBStates,
1524 DenseMap<Value *, RRInfo> &Releases) {
1525 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1526 bool NestingDetected = false;
1527 BBState &MyStates = BBStates[BB];
1528
1529 // Merge the states from each predecessor to compute the initial state
1530 // for the current block.
1531 BBState::edge_iterator PI(MyStates.pred_begin()),
1532 PE(MyStates.pred_end());
1533 if (PI != PE) {
1534 const BasicBlock *Pred = *PI;
1535 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1536 assert(I != BBStates.end());
1537 MyStates.InitFromPred(I->second);
1538 ++PI;
1539 for (; PI != PE; ++PI) {
1540 Pred = *PI;
1541 I = BBStates.find(Pred);
1542 assert(I != BBStates.end());
1543 MyStates.MergePred(I->second);
1544 }
1545 }
1546
1547 LLVM_DEBUG(dbgs() << "Before:\n"
1548 << BBStates[BB] << "\n"
1549 << "Performing Dataflow:\n");
1550
1551 // Visit all the instructions, top-down.
1552 for (Instruction &Inst : *BB) {
1553 LLVM_DEBUG(dbgs() << " Visiting " << Inst << "\n");
1554
1555 NestingDetected |= VisitInstructionTopDown(&Inst, Releases, MyStates);
1556
1557 // Bail out if the number of pointers being tracked becomes too large so
1558 // that this pass can complete in a reasonable amount of time.
1559 if (MyStates.top_down_ptr_list_size() > MaxPtrStates) {
1560 DisableRetainReleasePairing = true;
1561 return false;
1562 }
1563 }
1564
1565 LLVM_DEBUG(dbgs() << "\nState Before Checking for CFG Hazards:\n"
1566 << BBStates[BB] << "\n\n");
1567 CheckForCFGHazards(BB, BBStates, MyStates);
1568 LLVM_DEBUG(dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1569 return NestingDetected;
1570 }
1571
1572 static void
ComputePostOrders(Function & F,SmallVectorImpl<BasicBlock * > & PostOrder,SmallVectorImpl<BasicBlock * > & ReverseCFGPostOrder,unsigned NoObjCARCExceptionsMDKind,DenseMap<const BasicBlock *,BBState> & BBStates)1573 ComputePostOrders(Function &F,
1574 SmallVectorImpl<BasicBlock *> &PostOrder,
1575 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1576 unsigned NoObjCARCExceptionsMDKind,
1577 DenseMap<const BasicBlock *, BBState> &BBStates) {
1578 /// The visited set, for doing DFS walks.
1579 SmallPtrSet<BasicBlock *, 16> Visited;
1580
1581 // Do DFS, computing the PostOrder.
1582 SmallPtrSet<BasicBlock *, 16> OnStack;
1583 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1584
1585 // Functions always have exactly one entry block, and we don't have
1586 // any other block that we treat like an entry block.
1587 BasicBlock *EntryBB = &F.getEntryBlock();
1588 BBState &MyStates = BBStates[EntryBB];
1589 MyStates.SetAsEntry();
1590 Instruction *EntryTI = EntryBB->getTerminator();
1591 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1592 Visited.insert(EntryBB);
1593 OnStack.insert(EntryBB);
1594 do {
1595 dfs_next_succ:
1596 BasicBlock *CurrBB = SuccStack.back().first;
1597 succ_iterator SE(CurrBB->getTerminator(), false);
1598
1599 while (SuccStack.back().second != SE) {
1600 BasicBlock *SuccBB = *SuccStack.back().second++;
1601 if (Visited.insert(SuccBB).second) {
1602 SuccStack.push_back(
1603 std::make_pair(SuccBB, succ_iterator(SuccBB->getTerminator())));
1604 BBStates[CurrBB].addSucc(SuccBB);
1605 BBState &SuccStates = BBStates[SuccBB];
1606 SuccStates.addPred(CurrBB);
1607 OnStack.insert(SuccBB);
1608 goto dfs_next_succ;
1609 }
1610
1611 if (!OnStack.count(SuccBB)) {
1612 BBStates[CurrBB].addSucc(SuccBB);
1613 BBStates[SuccBB].addPred(CurrBB);
1614 }
1615 }
1616 OnStack.erase(CurrBB);
1617 PostOrder.push_back(CurrBB);
1618 SuccStack.pop_back();
1619 } while (!SuccStack.empty());
1620
1621 Visited.clear();
1622
1623 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1624 // Functions may have many exits, and there also blocks which we treat
1625 // as exits due to ignored edges.
1626 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1627 for (BasicBlock &ExitBB : F) {
1628 BBState &MyStates = BBStates[&ExitBB];
1629 if (!MyStates.isExit())
1630 continue;
1631
1632 MyStates.SetAsExit();
1633
1634 PredStack.push_back(std::make_pair(&ExitBB, MyStates.pred_begin()));
1635 Visited.insert(&ExitBB);
1636 while (!PredStack.empty()) {
1637 reverse_dfs_next_succ:
1638 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1639 while (PredStack.back().second != PE) {
1640 BasicBlock *BB = *PredStack.back().second++;
1641 if (Visited.insert(BB).second) {
1642 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1643 goto reverse_dfs_next_succ;
1644 }
1645 }
1646 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1647 }
1648 }
1649 }
1650
1651 // Visit the function both top-down and bottom-up.
Visit(Function & F,DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases)1652 bool ObjCARCOpt::Visit(Function &F,
1653 DenseMap<const BasicBlock *, BBState> &BBStates,
1654 BlotMapVector<Value *, RRInfo> &Retains,
1655 DenseMap<Value *, RRInfo> &Releases) {
1656 // Use reverse-postorder traversals, because we magically know that loops
1657 // will be well behaved, i.e. they won't repeatedly call retain on a single
1658 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1659 // class here because we want the reverse-CFG postorder to consider each
1660 // function exit point, and we want to ignore selected cycle edges.
1661 SmallVector<BasicBlock *, 16> PostOrder;
1662 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1663 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1664 MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1665 BBStates);
1666
1667 // Use reverse-postorder on the reverse CFG for bottom-up.
1668 bool BottomUpNestingDetected = false;
1669 for (BasicBlock *BB : llvm::reverse(ReverseCFGPostOrder)) {
1670 BottomUpNestingDetected |= VisitBottomUp(BB, BBStates, Retains);
1671 if (DisableRetainReleasePairing)
1672 return false;
1673 }
1674
1675 // Use reverse-postorder for top-down.
1676 bool TopDownNestingDetected = false;
1677 for (BasicBlock *BB : llvm::reverse(PostOrder)) {
1678 TopDownNestingDetected |= VisitTopDown(BB, BBStates, Releases);
1679 if (DisableRetainReleasePairing)
1680 return false;
1681 }
1682
1683 return TopDownNestingDetected && BottomUpNestingDetected;
1684 }
1685
1686 /// Move the calls in RetainsToMove and ReleasesToMove.
MoveCalls(Value * Arg,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,SmallVectorImpl<Instruction * > & DeadInsts,Module * M)1687 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1688 RRInfo &ReleasesToMove,
1689 BlotMapVector<Value *, RRInfo> &Retains,
1690 DenseMap<Value *, RRInfo> &Releases,
1691 SmallVectorImpl<Instruction *> &DeadInsts,
1692 Module *M) {
1693 Type *ArgTy = Arg->getType();
1694 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1695
1696 LLVM_DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1697
1698 // Insert the new retain and release calls.
1699 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1700 Value *MyArg = ArgTy == ParamTy ? Arg :
1701 new BitCastInst(Arg, ParamTy, "", InsertPt);
1702 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1703 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1704 Call->setDoesNotThrow();
1705 Call->setTailCall();
1706
1707 LLVM_DEBUG(dbgs() << "Inserting new Retain: " << *Call
1708 << "\n"
1709 "At insertion point: "
1710 << *InsertPt << "\n");
1711 }
1712 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1713 Value *MyArg = ArgTy == ParamTy ? Arg :
1714 new BitCastInst(Arg, ParamTy, "", InsertPt);
1715 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1716 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1717 // Attach a clang.imprecise_release metadata tag, if appropriate.
1718 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1719 Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1720 Call->setDoesNotThrow();
1721 if (ReleasesToMove.IsTailCallRelease)
1722 Call->setTailCall();
1723
1724 LLVM_DEBUG(dbgs() << "Inserting new Release: " << *Call
1725 << "\n"
1726 "At insertion point: "
1727 << *InsertPt << "\n");
1728 }
1729
1730 // Delete the original retain and release calls.
1731 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1732 Retains.blot(OrigRetain);
1733 DeadInsts.push_back(OrigRetain);
1734 LLVM_DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1735 }
1736 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1737 Releases.erase(OrigRelease);
1738 DeadInsts.push_back(OrigRelease);
1739 LLVM_DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1740 }
1741 }
1742
PairUpRetainsAndReleases(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M,Instruction * Retain,SmallVectorImpl<Instruction * > & DeadInsts,RRInfo & RetainsToMove,RRInfo & ReleasesToMove,Value * Arg,bool KnownSafe,bool & AnyPairsCompletelyEliminated)1743 bool ObjCARCOpt::PairUpRetainsAndReleases(
1744 DenseMap<const BasicBlock *, BBState> &BBStates,
1745 BlotMapVector<Value *, RRInfo> &Retains,
1746 DenseMap<Value *, RRInfo> &Releases, Module *M,
1747 Instruction *Retain,
1748 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1749 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1750 bool &AnyPairsCompletelyEliminated) {
1751 // If a pair happens in a region where it is known that the reference count
1752 // is already incremented, we can similarly ignore possible decrements unless
1753 // we are dealing with a retainable object with multiple provenance sources.
1754 bool KnownSafeTD = true, KnownSafeBU = true;
1755 bool CFGHazardAfflicted = false;
1756
1757 // Connect the dots between the top-down-collected RetainsToMove and
1758 // bottom-up-collected ReleasesToMove to form sets of related calls.
1759 // This is an iterative process so that we connect multiple releases
1760 // to multiple retains if needed.
1761 unsigned OldDelta = 0;
1762 unsigned NewDelta = 0;
1763 unsigned OldCount = 0;
1764 unsigned NewCount = 0;
1765 bool FirstRelease = true;
1766 for (SmallVector<Instruction *, 4> NewRetains{Retain};;) {
1767 SmallVector<Instruction *, 4> NewReleases;
1768 for (Instruction *NewRetain : NewRetains) {
1769 auto It = Retains.find(NewRetain);
1770 assert(It != Retains.end());
1771 const RRInfo &NewRetainRRI = It->second;
1772 KnownSafeTD &= NewRetainRRI.KnownSafe;
1773 CFGHazardAfflicted |= NewRetainRRI.CFGHazardAfflicted;
1774 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1775 auto Jt = Releases.find(NewRetainRelease);
1776 if (Jt == Releases.end())
1777 return false;
1778 const RRInfo &NewRetainReleaseRRI = Jt->second;
1779
1780 // If the release does not have a reference to the retain as well,
1781 // something happened which is unaccounted for. Do not do anything.
1782 //
1783 // This can happen if we catch an additive overflow during path count
1784 // merging.
1785 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1786 return false;
1787
1788 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1789 // If we overflow when we compute the path count, don't remove/move
1790 // anything.
1791 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1792 unsigned PathCount = BBState::OverflowOccurredValue;
1793 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1794 return false;
1795 assert(PathCount != BBState::OverflowOccurredValue &&
1796 "PathCount at this point can not be "
1797 "OverflowOccurredValue.");
1798 OldDelta -= PathCount;
1799
1800 // Merge the ReleaseMetadata and IsTailCallRelease values.
1801 if (FirstRelease) {
1802 ReleasesToMove.ReleaseMetadata =
1803 NewRetainReleaseRRI.ReleaseMetadata;
1804 ReleasesToMove.IsTailCallRelease =
1805 NewRetainReleaseRRI.IsTailCallRelease;
1806 FirstRelease = false;
1807 } else {
1808 if (ReleasesToMove.ReleaseMetadata !=
1809 NewRetainReleaseRRI.ReleaseMetadata)
1810 ReleasesToMove.ReleaseMetadata = nullptr;
1811 if (ReleasesToMove.IsTailCallRelease !=
1812 NewRetainReleaseRRI.IsTailCallRelease)
1813 ReleasesToMove.IsTailCallRelease = false;
1814 }
1815
1816 // Collect the optimal insertion points.
1817 if (!KnownSafe)
1818 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1819 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1820 // If we overflow when we compute the path count, don't
1821 // remove/move anything.
1822 const BBState &RIPBBState = BBStates[RIP->getParent()];
1823 PathCount = BBState::OverflowOccurredValue;
1824 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1825 return false;
1826 assert(PathCount != BBState::OverflowOccurredValue &&
1827 "PathCount at this point can not be "
1828 "OverflowOccurredValue.");
1829 NewDelta -= PathCount;
1830 }
1831 }
1832 NewReleases.push_back(NewRetainRelease);
1833 }
1834 }
1835 }
1836 NewRetains.clear();
1837 if (NewReleases.empty()) break;
1838
1839 // Back the other way.
1840 for (Instruction *NewRelease : NewReleases) {
1841 auto It = Releases.find(NewRelease);
1842 assert(It != Releases.end());
1843 const RRInfo &NewReleaseRRI = It->second;
1844 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1845 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1846 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1847 auto Jt = Retains.find(NewReleaseRetain);
1848 if (Jt == Retains.end())
1849 return false;
1850 const RRInfo &NewReleaseRetainRRI = Jt->second;
1851
1852 // If the retain does not have a reference to the release as well,
1853 // something happened which is unaccounted for. Do not do anything.
1854 //
1855 // This can happen if we catch an additive overflow during path count
1856 // merging.
1857 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1858 return false;
1859
1860 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1861 // If we overflow when we compute the path count, don't remove/move
1862 // anything.
1863 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1864 unsigned PathCount = BBState::OverflowOccurredValue;
1865 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1866 return false;
1867 assert(PathCount != BBState::OverflowOccurredValue &&
1868 "PathCount at this point can not be "
1869 "OverflowOccurredValue.");
1870 OldDelta += PathCount;
1871 OldCount += PathCount;
1872
1873 // Collect the optimal insertion points.
1874 if (!KnownSafe)
1875 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1876 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1877 // If we overflow when we compute the path count, don't
1878 // remove/move anything.
1879 const BBState &RIPBBState = BBStates[RIP->getParent()];
1880
1881 PathCount = BBState::OverflowOccurredValue;
1882 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1883 return false;
1884 assert(PathCount != BBState::OverflowOccurredValue &&
1885 "PathCount at this point can not be "
1886 "OverflowOccurredValue.");
1887 NewDelta += PathCount;
1888 NewCount += PathCount;
1889 }
1890 }
1891 NewRetains.push_back(NewReleaseRetain);
1892 }
1893 }
1894 }
1895 if (NewRetains.empty()) break;
1896 }
1897
1898 // We can only remove pointers if we are known safe in both directions.
1899 bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1900 if (UnconditionallySafe) {
1901 RetainsToMove.ReverseInsertPts.clear();
1902 ReleasesToMove.ReverseInsertPts.clear();
1903 NewCount = 0;
1904 } else {
1905 // Determine whether the new insertion points we computed preserve the
1906 // balance of retain and release calls through the program.
1907 // TODO: If the fully aggressive solution isn't valid, try to find a
1908 // less aggressive solution which is.
1909 if (NewDelta != 0)
1910 return false;
1911
1912 // At this point, we are not going to remove any RR pairs, but we still are
1913 // able to move RR pairs. If one of our pointers is afflicted with
1914 // CFGHazards, we cannot perform such code motion so exit early.
1915 const bool WillPerformCodeMotion =
1916 !RetainsToMove.ReverseInsertPts.empty() ||
1917 !ReleasesToMove.ReverseInsertPts.empty();
1918 if (CFGHazardAfflicted && WillPerformCodeMotion)
1919 return false;
1920 }
1921
1922 // Determine whether the original call points are balanced in the retain and
1923 // release calls through the program. If not, conservatively don't touch
1924 // them.
1925 // TODO: It's theoretically possible to do code motion in this case, as
1926 // long as the existing imbalances are maintained.
1927 if (OldDelta != 0)
1928 return false;
1929
1930 Changed = true;
1931 assert(OldCount != 0 && "Unreachable code?");
1932 NumRRs += OldCount - NewCount;
1933 // Set to true if we completely removed any RR pairs.
1934 AnyPairsCompletelyEliminated = NewCount == 0;
1935
1936 // We can move calls!
1937 return true;
1938 }
1939
1940 /// Identify pairings between the retains and releases, and delete and/or move
1941 /// them.
PerformCodePlacement(DenseMap<const BasicBlock *,BBState> & BBStates,BlotMapVector<Value *,RRInfo> & Retains,DenseMap<Value *,RRInfo> & Releases,Module * M)1942 bool ObjCARCOpt::PerformCodePlacement(
1943 DenseMap<const BasicBlock *, BBState> &BBStates,
1944 BlotMapVector<Value *, RRInfo> &Retains,
1945 DenseMap<Value *, RRInfo> &Releases, Module *M) {
1946 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
1947
1948 bool AnyPairsCompletelyEliminated = false;
1949 SmallVector<Instruction *, 8> DeadInsts;
1950
1951 // Visit each retain.
1952 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
1953 E = Retains.end();
1954 I != E; ++I) {
1955 Value *V = I->first;
1956 if (!V) continue; // blotted
1957
1958 Instruction *Retain = cast<Instruction>(V);
1959
1960 LLVM_DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
1961
1962 Value *Arg = GetArgRCIdentityRoot(Retain);
1963
1964 // If the object being released is in static or stack storage, we know it's
1965 // not being managed by ObjC reference counting, so we can delete pairs
1966 // regardless of what possible decrements or uses lie between them.
1967 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
1968
1969 // A constant pointer can't be pointing to an object on the heap. It may
1970 // be reference-counted, but it won't be deleted.
1971 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
1972 if (const GlobalVariable *GV =
1973 dyn_cast<GlobalVariable>(
1974 GetRCIdentityRoot(LI->getPointerOperand())))
1975 if (GV->isConstant())
1976 KnownSafe = true;
1977
1978 // Connect the dots between the top-down-collected RetainsToMove and
1979 // bottom-up-collected ReleasesToMove to form sets of related calls.
1980 RRInfo RetainsToMove, ReleasesToMove;
1981
1982 bool PerformMoveCalls = PairUpRetainsAndReleases(
1983 BBStates, Retains, Releases, M, Retain, DeadInsts,
1984 RetainsToMove, ReleasesToMove, Arg, KnownSafe,
1985 AnyPairsCompletelyEliminated);
1986
1987 if (PerformMoveCalls) {
1988 // Ok, everything checks out and we're all set. Let's move/delete some
1989 // code!
1990 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
1991 Retains, Releases, DeadInsts, M);
1992 }
1993 }
1994
1995 // Now that we're done moving everything, we can delete the newly dead
1996 // instructions, as we no longer need them as insert points.
1997 while (!DeadInsts.empty())
1998 EraseInstruction(DeadInsts.pop_back_val());
1999
2000 return AnyPairsCompletelyEliminated;
2001 }
2002
2003 /// Weak pointer optimizations.
OptimizeWeakCalls(Function & F)2004 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2005 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2006
2007 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2008 // itself because it uses AliasAnalysis and we need to do provenance
2009 // queries instead.
2010 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2011 Instruction *Inst = &*I++;
2012
2013 LLVM_DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2014
2015 ARCInstKind Class = GetBasicARCInstKind(Inst);
2016 if (Class != ARCInstKind::LoadWeak &&
2017 Class != ARCInstKind::LoadWeakRetained)
2018 continue;
2019
2020 // Delete objc_loadWeak calls with no users.
2021 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2022 Inst->eraseFromParent();
2023 continue;
2024 }
2025
2026 // TODO: For now, just look for an earlier available version of this value
2027 // within the same block. Theoretically, we could do memdep-style non-local
2028 // analysis too, but that would want caching. A better approach would be to
2029 // use the technique that EarlyCSE uses.
2030 inst_iterator Current = std::prev(I);
2031 BasicBlock *CurrentBB = &*Current.getBasicBlockIterator();
2032 for (BasicBlock::iterator B = CurrentBB->begin(),
2033 J = Current.getInstructionIterator();
2034 J != B; --J) {
2035 Instruction *EarlierInst = &*std::prev(J);
2036 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2037 switch (EarlierClass) {
2038 case ARCInstKind::LoadWeak:
2039 case ARCInstKind::LoadWeakRetained: {
2040 // If this is loading from the same pointer, replace this load's value
2041 // with that one.
2042 CallInst *Call = cast<CallInst>(Inst);
2043 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2044 Value *Arg = Call->getArgOperand(0);
2045 Value *EarlierArg = EarlierCall->getArgOperand(0);
2046 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2047 case MustAlias:
2048 Changed = true;
2049 // If the load has a builtin retain, insert a plain retain for it.
2050 if (Class == ARCInstKind::LoadWeakRetained) {
2051 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2052 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2053 CI->setTailCall();
2054 }
2055 // Zap the fully redundant load.
2056 Call->replaceAllUsesWith(EarlierCall);
2057 Call->eraseFromParent();
2058 goto clobbered;
2059 case MayAlias:
2060 case PartialAlias:
2061 goto clobbered;
2062 case NoAlias:
2063 break;
2064 }
2065 break;
2066 }
2067 case ARCInstKind::StoreWeak:
2068 case ARCInstKind::InitWeak: {
2069 // If this is storing to the same pointer and has the same size etc.
2070 // replace this load's value with the stored value.
2071 CallInst *Call = cast<CallInst>(Inst);
2072 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2073 Value *Arg = Call->getArgOperand(0);
2074 Value *EarlierArg = EarlierCall->getArgOperand(0);
2075 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2076 case MustAlias:
2077 Changed = true;
2078 // If the load has a builtin retain, insert a plain retain for it.
2079 if (Class == ARCInstKind::LoadWeakRetained) {
2080 Function *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
2081 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2082 CI->setTailCall();
2083 }
2084 // Zap the fully redundant load.
2085 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2086 Call->eraseFromParent();
2087 goto clobbered;
2088 case MayAlias:
2089 case PartialAlias:
2090 goto clobbered;
2091 case NoAlias:
2092 break;
2093 }
2094 break;
2095 }
2096 case ARCInstKind::MoveWeak:
2097 case ARCInstKind::CopyWeak:
2098 // TOOD: Grab the copied value.
2099 goto clobbered;
2100 case ARCInstKind::AutoreleasepoolPush:
2101 case ARCInstKind::None:
2102 case ARCInstKind::IntrinsicUser:
2103 case ARCInstKind::User:
2104 // Weak pointers are only modified through the weak entry points
2105 // (and arbitrary calls, which could call the weak entry points).
2106 break;
2107 default:
2108 // Anything else could modify the weak pointer.
2109 goto clobbered;
2110 }
2111 }
2112 clobbered:;
2113 }
2114
2115 // Then, for each destroyWeak with an alloca operand, check to see if
2116 // the alloca and all its users can be zapped.
2117 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2118 Instruction *Inst = &*I++;
2119 ARCInstKind Class = GetBasicARCInstKind(Inst);
2120 if (Class != ARCInstKind::DestroyWeak)
2121 continue;
2122
2123 CallInst *Call = cast<CallInst>(Inst);
2124 Value *Arg = Call->getArgOperand(0);
2125 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2126 for (User *U : Alloca->users()) {
2127 const Instruction *UserInst = cast<Instruction>(U);
2128 switch (GetBasicARCInstKind(UserInst)) {
2129 case ARCInstKind::InitWeak:
2130 case ARCInstKind::StoreWeak:
2131 case ARCInstKind::DestroyWeak:
2132 continue;
2133 default:
2134 goto done;
2135 }
2136 }
2137 Changed = true;
2138 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
2139 CallInst *UserInst = cast<CallInst>(*UI++);
2140 switch (GetBasicARCInstKind(UserInst)) {
2141 case ARCInstKind::InitWeak:
2142 case ARCInstKind::StoreWeak:
2143 // These functions return their second argument.
2144 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2145 break;
2146 case ARCInstKind::DestroyWeak:
2147 // No return value.
2148 break;
2149 default:
2150 llvm_unreachable("alloca really is used!");
2151 }
2152 UserInst->eraseFromParent();
2153 }
2154 Alloca->eraseFromParent();
2155 done:;
2156 }
2157 }
2158 }
2159
2160 /// Identify program paths which execute sequences of retains and releases which
2161 /// can be eliminated.
OptimizeSequences(Function & F)2162 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2163 // Releases, Retains - These are used to store the results of the main flow
2164 // analysis. These use Value* as the key instead of Instruction* so that the
2165 // map stays valid when we get around to rewriting code and calls get
2166 // replaced by arguments.
2167 DenseMap<Value *, RRInfo> Releases;
2168 BlotMapVector<Value *, RRInfo> Retains;
2169
2170 // This is used during the traversal of the function to track the
2171 // states for each identified object at each block.
2172 DenseMap<const BasicBlock *, BBState> BBStates;
2173
2174 // Analyze the CFG of the function, and all instructions.
2175 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2176
2177 if (DisableRetainReleasePairing)
2178 return false;
2179
2180 // Transform.
2181 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2182 Releases,
2183 F.getParent());
2184
2185 return AnyPairsCompletelyEliminated && NestingDetected;
2186 }
2187
2188 /// Check if there is a dependent call earlier that does not have anything in
2189 /// between the Retain and the call that can affect the reference count of their
2190 /// shared pointer argument. Note that Retain need not be in BB.
2191 static bool
HasSafePathToPredecessorCall(const Value * Arg,Instruction * Retain,SmallPtrSetImpl<Instruction * > & DepInsts,SmallPtrSetImpl<const BasicBlock * > & Visited,ProvenanceAnalysis & PA)2192 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2193 SmallPtrSetImpl<Instruction *> &DepInsts,
2194 SmallPtrSetImpl<const BasicBlock *> &Visited,
2195 ProvenanceAnalysis &PA) {
2196 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2197 DepInsts, Visited, PA);
2198 if (DepInsts.size() != 1)
2199 return false;
2200
2201 auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2202
2203 // Check that the pointer is the return value of the call.
2204 if (!Call || Arg != Call)
2205 return false;
2206
2207 // Check that the call is a regular call.
2208 ARCInstKind Class = GetBasicARCInstKind(Call);
2209 return Class == ARCInstKind::CallOrUser || Class == ARCInstKind::Call;
2210 }
2211
2212 /// Find a dependent retain that precedes the given autorelease for which there
2213 /// is nothing in between the two instructions that can affect the ref count of
2214 /// Arg.
2215 static CallInst *
FindPredecessorRetainWithSafePath(const Value * Arg,BasicBlock * BB,Instruction * Autorelease,SmallPtrSetImpl<Instruction * > & DepInsts,SmallPtrSetImpl<const BasicBlock * > & Visited,ProvenanceAnalysis & PA)2216 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2217 Instruction *Autorelease,
2218 SmallPtrSetImpl<Instruction *> &DepInsts,
2219 SmallPtrSetImpl<const BasicBlock *> &Visited,
2220 ProvenanceAnalysis &PA) {
2221 FindDependencies(CanChangeRetainCount, Arg,
2222 BB, Autorelease, DepInsts, Visited, PA);
2223 if (DepInsts.size() != 1)
2224 return nullptr;
2225
2226 auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2227
2228 // Check that we found a retain with the same argument.
2229 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2230 GetArgRCIdentityRoot(Retain) != Arg) {
2231 return nullptr;
2232 }
2233
2234 return Retain;
2235 }
2236
2237 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2238 /// no instructions dependent on Arg that need a positive ref count in between
2239 /// the autorelease and the ret.
2240 static CallInst *
FindPredecessorAutoreleaseWithSafePath(const Value * Arg,BasicBlock * BB,ReturnInst * Ret,SmallPtrSetImpl<Instruction * > & DepInsts,SmallPtrSetImpl<const BasicBlock * > & V,ProvenanceAnalysis & PA)2241 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2242 ReturnInst *Ret,
2243 SmallPtrSetImpl<Instruction *> &DepInsts,
2244 SmallPtrSetImpl<const BasicBlock *> &V,
2245 ProvenanceAnalysis &PA) {
2246 FindDependencies(NeedsPositiveRetainCount, Arg,
2247 BB, Ret, DepInsts, V, PA);
2248 if (DepInsts.size() != 1)
2249 return nullptr;
2250
2251 auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2252 if (!Autorelease)
2253 return nullptr;
2254 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2255 if (!IsAutorelease(AutoreleaseClass))
2256 return nullptr;
2257 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2258 return nullptr;
2259
2260 return Autorelease;
2261 }
2262
2263 /// Look for this pattern:
2264 /// \code
2265 /// %call = call i8* @something(...)
2266 /// %2 = call i8* @objc_retain(i8* %call)
2267 /// %3 = call i8* @objc_autorelease(i8* %2)
2268 /// ret i8* %3
2269 /// \endcode
2270 /// And delete the retain and autorelease.
OptimizeReturns(Function & F)2271 void ObjCARCOpt::OptimizeReturns(Function &F) {
2272 if (!F.getReturnType()->isPointerTy())
2273 return;
2274
2275 LLVM_DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2276
2277 SmallPtrSet<Instruction *, 4> DependingInstructions;
2278 SmallPtrSet<const BasicBlock *, 4> Visited;
2279 for (BasicBlock &BB: F) {
2280 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB.back());
2281 if (!Ret)
2282 continue;
2283
2284 LLVM_DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2285
2286 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2287
2288 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2289 // dependent on Arg such that there are no instructions dependent on Arg
2290 // that need a positive ref count in between the autorelease and Ret.
2291 CallInst *Autorelease = FindPredecessorAutoreleaseWithSafePath(
2292 Arg, &BB, Ret, DependingInstructions, Visited, PA);
2293 DependingInstructions.clear();
2294 Visited.clear();
2295
2296 if (!Autorelease)
2297 continue;
2298
2299 CallInst *Retain = FindPredecessorRetainWithSafePath(
2300 Arg, Autorelease->getParent(), Autorelease, DependingInstructions,
2301 Visited, PA);
2302 DependingInstructions.clear();
2303 Visited.clear();
2304
2305 if (!Retain)
2306 continue;
2307
2308 // Check that there is nothing that can affect the reference count
2309 // between the retain and the call. Note that Retain need not be in BB.
2310 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2311 DependingInstructions,
2312 Visited, PA);
2313 DependingInstructions.clear();
2314 Visited.clear();
2315
2316 if (!HasSafePathToCall)
2317 continue;
2318
2319 // If so, we can zap the retain and autorelease.
2320 Changed = true;
2321 ++NumRets;
2322 LLVM_DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: " << *Autorelease
2323 << "\n");
2324 EraseInstruction(Retain);
2325 EraseInstruction(Autorelease);
2326 }
2327 }
2328
2329 #ifndef NDEBUG
2330 void
GatherStatistics(Function & F,bool AfterOptimization)2331 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2332 Statistic &NumRetains =
2333 AfterOptimization ? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2334 Statistic &NumReleases =
2335 AfterOptimization ? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2336
2337 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2338 Instruction *Inst = &*I++;
2339 switch (GetBasicARCInstKind(Inst)) {
2340 default:
2341 break;
2342 case ARCInstKind::Retain:
2343 ++NumRetains;
2344 break;
2345 case ARCInstKind::Release:
2346 ++NumReleases;
2347 break;
2348 }
2349 }
2350 }
2351 #endif
2352
doInitialization(Module & M)2353 bool ObjCARCOpt::doInitialization(Module &M) {
2354 if (!EnableARCOpts)
2355 return false;
2356
2357 // If nothing in the Module uses ARC, don't do anything.
2358 Run = ModuleHasARC(M);
2359 if (!Run)
2360 return false;
2361
2362 // Intuitively, objc_retain and others are nocapture, however in practice
2363 // they are not, because they return their argument value. And objc_release
2364 // calls finalizers which can have arbitrary side effects.
2365 MDKindCache.init(&M);
2366
2367 // Initialize our runtime entry point cache.
2368 EP.init(&M);
2369
2370 return false;
2371 }
2372
runOnFunction(Function & F)2373 bool ObjCARCOpt::runOnFunction(Function &F) {
2374 if (!EnableARCOpts)
2375 return false;
2376
2377 // If nothing in the Module uses ARC, don't do anything.
2378 if (!Run)
2379 return false;
2380
2381 Changed = false;
2382
2383 LLVM_DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName()
2384 << " >>>"
2385 "\n");
2386
2387 PA.setAA(&getAnalysis<AAResultsWrapperPass>().getAAResults());
2388
2389 #ifndef NDEBUG
2390 if (AreStatisticsEnabled()) {
2391 GatherStatistics(F, false);
2392 }
2393 #endif
2394
2395 // This pass performs several distinct transformations. As a compile-time aid
2396 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2397 // library functions aren't declared.
2398
2399 // Preliminary optimizations. This also computes UsedInThisFunction.
2400 OptimizeIndividualCalls(F);
2401
2402 // Optimizations for weak pointers.
2403 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2404 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2405 (1 << unsigned(ARCInstKind::StoreWeak)) |
2406 (1 << unsigned(ARCInstKind::InitWeak)) |
2407 (1 << unsigned(ARCInstKind::CopyWeak)) |
2408 (1 << unsigned(ARCInstKind::MoveWeak)) |
2409 (1 << unsigned(ARCInstKind::DestroyWeak))))
2410 OptimizeWeakCalls(F);
2411
2412 // Optimizations for retain+release pairs.
2413 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2414 (1 << unsigned(ARCInstKind::RetainRV)) |
2415 (1 << unsigned(ARCInstKind::RetainBlock))))
2416 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2417 // Run OptimizeSequences until it either stops making changes or
2418 // no retain+release pair nesting is detected.
2419 while (OptimizeSequences(F)) {}
2420
2421 // Optimizations if objc_autorelease is used.
2422 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2423 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2424 OptimizeReturns(F);
2425
2426 // Gather statistics after optimization.
2427 #ifndef NDEBUG
2428 if (AreStatisticsEnabled()) {
2429 GatherStatistics(F, true);
2430 }
2431 #endif
2432
2433 LLVM_DEBUG(dbgs() << "\n");
2434
2435 return Changed;
2436 }
2437
releaseMemory()2438 void ObjCARCOpt::releaseMemory() {
2439 PA.clear();
2440 }
2441
2442 /// @}
2443 ///
2444