1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the mechanics required to implement inlining without
11 // missing any calls and updating the call graph. The decisions of which calls
12 // are profitable to inline are implemented elsewhere.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Transforms/IPO/InlinerPass.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/CallGraph.h"
20 #include "llvm/Analysis/InlineCost.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/DiagnosticInfo.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/raw_ostream.h"
30 #include "llvm/Target/TargetLibraryInfo.h"
31 #include "llvm/Transforms/Utils/Cloning.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 using namespace llvm;
34
35 #define DEBUG_TYPE "inline"
36
37 STATISTIC(NumInlined, "Number of functions inlined");
38 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
39 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
40 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
41
42 // This weirdly named statistic tracks the number of times that, when attempting
43 // to inline a function A into B, we analyze the callers of B in order to see
44 // if those would be more profitable and blocked inline steps.
45 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
46
47 static cl::opt<int>
48 InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
49 cl::desc("Control the amount of inlining to perform (default = 225)"));
50
51 static cl::opt<int>
52 HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
53 cl::desc("Threshold for inlining functions with inline hint"));
54
55 // We instroduce this threshold to help performance of instrumentation based
56 // PGO before we actually hook up inliner with analysis passes such as BPI and
57 // BFI.
58 static cl::opt<int>
59 ColdThreshold("inlinecold-threshold", cl::Hidden, cl::init(225),
60 cl::desc("Threshold for inlining functions with cold attribute"));
61
62 // Threshold to use when optsize is specified (and there is no -inline-limit).
63 const int OptSizeThreshold = 75;
64
Inliner(char & ID)65 Inliner::Inliner(char &ID)
66 : CallGraphSCCPass(ID), InlineThreshold(InlineLimit), InsertLifetime(true) {}
67
Inliner(char & ID,int Threshold,bool InsertLifetime)68 Inliner::Inliner(char &ID, int Threshold, bool InsertLifetime)
69 : CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ?
70 InlineLimit : Threshold),
71 InsertLifetime(InsertLifetime) {}
72
73 /// getAnalysisUsage - For this class, we declare that we require and preserve
74 /// the call graph. If the derived class implements this method, it should
75 /// always explicitly call the implementation here.
getAnalysisUsage(AnalysisUsage & AU) const76 void Inliner::getAnalysisUsage(AnalysisUsage &AU) const {
77 CallGraphSCCPass::getAnalysisUsage(AU);
78 }
79
80
81 typedef DenseMap<ArrayType*, std::vector<AllocaInst*> >
82 InlinedArrayAllocasTy;
83
84 /// \brief If the inlined function had a higher stack protection level than the
85 /// calling function, then bump up the caller's stack protection level.
AdjustCallerSSPLevel(Function * Caller,Function * Callee)86 static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) {
87 // If upgrading the SSP attribute, clear out the old SSP Attributes first.
88 // Having multiple SSP attributes doesn't actually hurt, but it adds useless
89 // clutter to the IR.
90 AttrBuilder B;
91 B.addAttribute(Attribute::StackProtect)
92 .addAttribute(Attribute::StackProtectStrong);
93 AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(),
94 AttributeSet::FunctionIndex,
95 B);
96 AttributeSet CallerAttr = Caller->getAttributes(),
97 CalleeAttr = Callee->getAttributes();
98
99 if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex,
100 Attribute::StackProtectReq)) {
101 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
102 Caller->addFnAttr(Attribute::StackProtectReq);
103 } else if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex,
104 Attribute::StackProtectStrong) &&
105 !CallerAttr.hasAttribute(AttributeSet::FunctionIndex,
106 Attribute::StackProtectReq)) {
107 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
108 Caller->addFnAttr(Attribute::StackProtectStrong);
109 } else if (CalleeAttr.hasAttribute(AttributeSet::FunctionIndex,
110 Attribute::StackProtect) &&
111 !CallerAttr.hasAttribute(AttributeSet::FunctionIndex,
112 Attribute::StackProtectReq) &&
113 !CallerAttr.hasAttribute(AttributeSet::FunctionIndex,
114 Attribute::StackProtectStrong))
115 Caller->addFnAttr(Attribute::StackProtect);
116 }
117
118 /// InlineCallIfPossible - If it is possible to inline the specified call site,
119 /// do so and update the CallGraph for this operation.
120 ///
121 /// This function also does some basic book-keeping to update the IR. The
122 /// InlinedArrayAllocas map keeps track of any allocas that are already
123 /// available from other functions inlined into the caller. If we are able to
124 /// inline this call site we attempt to reuse already available allocas or add
125 /// any new allocas to the set if not possible.
InlineCallIfPossible(CallSite CS,InlineFunctionInfo & IFI,InlinedArrayAllocasTy & InlinedArrayAllocas,int InlineHistory,bool InsertLifetime,const DataLayout * DL)126 static bool InlineCallIfPossible(CallSite CS, InlineFunctionInfo &IFI,
127 InlinedArrayAllocasTy &InlinedArrayAllocas,
128 int InlineHistory, bool InsertLifetime,
129 const DataLayout *DL) {
130 Function *Callee = CS.getCalledFunction();
131 Function *Caller = CS.getCaller();
132
133 // Try to inline the function. Get the list of static allocas that were
134 // inlined.
135 if (!InlineFunction(CS, IFI, InsertLifetime))
136 return false;
137
138 AdjustCallerSSPLevel(Caller, Callee);
139
140 // Look at all of the allocas that we inlined through this call site. If we
141 // have already inlined other allocas through other calls into this function,
142 // then we know that they have disjoint lifetimes and that we can merge them.
143 //
144 // There are many heuristics possible for merging these allocas, and the
145 // different options have different tradeoffs. One thing that we *really*
146 // don't want to hurt is SRoA: once inlining happens, often allocas are no
147 // longer address taken and so they can be promoted.
148 //
149 // Our "solution" for that is to only merge allocas whose outermost type is an
150 // array type. These are usually not promoted because someone is using a
151 // variable index into them. These are also often the most important ones to
152 // merge.
153 //
154 // A better solution would be to have real memory lifetime markers in the IR
155 // and not have the inliner do any merging of allocas at all. This would
156 // allow the backend to do proper stack slot coloring of all allocas that
157 // *actually make it to the backend*, which is really what we want.
158 //
159 // Because we don't have this information, we do this simple and useful hack.
160 //
161 SmallPtrSet<AllocaInst*, 16> UsedAllocas;
162
163 // When processing our SCC, check to see if CS was inlined from some other
164 // call site. For example, if we're processing "A" in this code:
165 // A() { B() }
166 // B() { x = alloca ... C() }
167 // C() { y = alloca ... }
168 // Assume that C was not inlined into B initially, and so we're processing A
169 // and decide to inline B into A. Doing this makes an alloca available for
170 // reuse and makes a callsite (C) available for inlining. When we process
171 // the C call site we don't want to do any alloca merging between X and Y
172 // because their scopes are not disjoint. We could make this smarter by
173 // keeping track of the inline history for each alloca in the
174 // InlinedArrayAllocas but this isn't likely to be a significant win.
175 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
176 return true;
177
178 // Loop over all the allocas we have so far and see if they can be merged with
179 // a previously inlined alloca. If not, remember that we had it.
180 for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
181 AllocaNo != e; ++AllocaNo) {
182 AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
183
184 // Don't bother trying to merge array allocations (they will usually be
185 // canonicalized to be an allocation *of* an array), or allocations whose
186 // type is not itself an array (because we're afraid of pessimizing SRoA).
187 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
188 if (!ATy || AI->isArrayAllocation())
189 continue;
190
191 // Get the list of all available allocas for this array type.
192 std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
193
194 // Loop over the allocas in AllocasForType to see if we can reuse one. Note
195 // that we have to be careful not to reuse the same "available" alloca for
196 // multiple different allocas that we just inlined, we use the 'UsedAllocas'
197 // set to keep track of which "available" allocas are being used by this
198 // function. Also, AllocasForType can be empty of course!
199 bool MergedAwayAlloca = false;
200 for (unsigned i = 0, e = AllocasForType.size(); i != e; ++i) {
201 AllocaInst *AvailableAlloca = AllocasForType[i];
202
203 unsigned Align1 = AI->getAlignment(),
204 Align2 = AvailableAlloca->getAlignment();
205 // If we don't have data layout information, and only one alloca is using
206 // the target default, then we can't safely merge them because we can't
207 // pick the greater alignment.
208 if (!DL && (!Align1 || !Align2) && Align1 != Align2)
209 continue;
210
211 // The available alloca has to be in the right function, not in some other
212 // function in this SCC.
213 if (AvailableAlloca->getParent() != AI->getParent())
214 continue;
215
216 // If the inlined function already uses this alloca then we can't reuse
217 // it.
218 if (!UsedAllocas.insert(AvailableAlloca))
219 continue;
220
221 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
222 // success!
223 DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI << "\n\t\tINTO: "
224 << *AvailableAlloca << '\n');
225
226 AI->replaceAllUsesWith(AvailableAlloca);
227
228 if (Align1 != Align2) {
229 if (!Align1 || !Align2) {
230 assert(DL && "DataLayout required to compare default alignments");
231 unsigned TypeAlign = DL->getABITypeAlignment(AI->getAllocatedType());
232
233 Align1 = Align1 ? Align1 : TypeAlign;
234 Align2 = Align2 ? Align2 : TypeAlign;
235 }
236
237 if (Align1 > Align2)
238 AvailableAlloca->setAlignment(AI->getAlignment());
239 }
240
241 AI->eraseFromParent();
242 MergedAwayAlloca = true;
243 ++NumMergedAllocas;
244 IFI.StaticAllocas[AllocaNo] = nullptr;
245 break;
246 }
247
248 // If we already nuked the alloca, we're done with it.
249 if (MergedAwayAlloca)
250 continue;
251
252 // If we were unable to merge away the alloca either because there are no
253 // allocas of the right type available or because we reused them all
254 // already, remember that this alloca came from an inlined function and mark
255 // it used so we don't reuse it for other allocas from this inline
256 // operation.
257 AllocasForType.push_back(AI);
258 UsedAllocas.insert(AI);
259 }
260
261 return true;
262 }
263
getInlineThreshold(CallSite CS) const264 unsigned Inliner::getInlineThreshold(CallSite CS) const {
265 int thres = InlineThreshold; // -inline-threshold or else selected by
266 // overall opt level
267
268 // If -inline-threshold is not given, listen to the optsize attribute when it
269 // would decrease the threshold.
270 Function *Caller = CS.getCaller();
271 bool OptSize = Caller && !Caller->isDeclaration() &&
272 Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
273 Attribute::OptimizeForSize);
274 if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
275 OptSizeThreshold < thres)
276 thres = OptSizeThreshold;
277
278 // Listen to the inlinehint attribute when it would increase the threshold
279 // and the caller does not need to minimize its size.
280 Function *Callee = CS.getCalledFunction();
281 bool InlineHint = Callee && !Callee->isDeclaration() &&
282 Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
283 Attribute::InlineHint);
284 if (InlineHint && HintThreshold > thres
285 && !Caller->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
286 Attribute::MinSize))
287 thres = HintThreshold;
288
289 // Listen to the cold attribute when it would decrease the threshold.
290 bool ColdCallee = Callee && !Callee->isDeclaration() &&
291 Callee->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
292 Attribute::Cold);
293 // Command line argument for InlineLimit will override the default
294 // ColdThreshold. If we have -inline-threshold but no -inlinecold-threshold,
295 // do not use the default cold threshold even if it is smaller.
296 if ((InlineLimit.getNumOccurrences() == 0 ||
297 ColdThreshold.getNumOccurrences() > 0) && ColdCallee &&
298 ColdThreshold < thres)
299 thres = ColdThreshold;
300
301 return thres;
302 }
303
emitAnalysis(CallSite CS,const Twine & Msg)304 static void emitAnalysis(CallSite CS, const Twine &Msg) {
305 Function *Caller = CS.getCaller();
306 LLVMContext &Ctx = Caller->getContext();
307 DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
308 emitOptimizationRemarkAnalysis(Ctx, DEBUG_TYPE, *Caller, DLoc, Msg);
309 }
310
311 /// shouldInline - Return true if the inliner should attempt to inline
312 /// at the given CallSite.
shouldInline(CallSite CS)313 bool Inliner::shouldInline(CallSite CS) {
314 InlineCost IC = getInlineCost(CS);
315
316 if (IC.isAlways()) {
317 DEBUG(dbgs() << " Inlining: cost=always"
318 << ", Call: " << *CS.getInstruction() << "\n");
319 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName()) +
320 " should always be inlined (cost=always)");
321 return true;
322 }
323
324 if (IC.isNever()) {
325 DEBUG(dbgs() << " NOT Inlining: cost=never"
326 << ", Call: " << *CS.getInstruction() << "\n");
327 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
328 " should never be inlined (cost=never)"));
329 return false;
330 }
331
332 Function *Caller = CS.getCaller();
333 if (!IC) {
334 DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
335 << ", thres=" << (IC.getCostDelta() + IC.getCost())
336 << ", Call: " << *CS.getInstruction() << "\n");
337 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
338 " too costly to inline (cost=") +
339 Twine(IC.getCost()) + ", threshold=" +
340 Twine(IC.getCostDelta() + IC.getCost()) + ")");
341 return false;
342 }
343
344 // Try to detect the case where the current inlining candidate caller (call
345 // it B) is a static or linkonce-ODR function and is an inlining candidate
346 // elsewhere, and the current candidate callee (call it C) is large enough
347 // that inlining it into B would make B too big to inline later. In these
348 // circumstances it may be best not to inline C into B, but to inline B into
349 // its callers.
350 //
351 // This only applies to static and linkonce-ODR functions because those are
352 // expected to be available for inlining in the translation units where they
353 // are used. Thus we will always have the opportunity to make local inlining
354 // decisions. Importantly the linkonce-ODR linkage covers inline functions
355 // and templates in C++.
356 //
357 // FIXME: All of this logic should be sunk into getInlineCost. It relies on
358 // the internal implementation of the inline cost metrics rather than
359 // treating them as truly abstract units etc.
360 if (Caller->hasLocalLinkage() ||
361 Caller->getLinkage() == GlobalValue::LinkOnceODRLinkage) {
362 int TotalSecondaryCost = 0;
363 // The candidate cost to be imposed upon the current function.
364 int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
365 // This bool tracks what happens if we do NOT inline C into B.
366 bool callerWillBeRemoved = Caller->hasLocalLinkage();
367 // This bool tracks what happens if we DO inline C into B.
368 bool inliningPreventsSomeOuterInline = false;
369 for (User *U : Caller->users()) {
370 CallSite CS2(U);
371
372 // If this isn't a call to Caller (it could be some other sort
373 // of reference) skip it. Such references will prevent the caller
374 // from being removed.
375 if (!CS2 || CS2.getCalledFunction() != Caller) {
376 callerWillBeRemoved = false;
377 continue;
378 }
379
380 InlineCost IC2 = getInlineCost(CS2);
381 ++NumCallerCallersAnalyzed;
382 if (!IC2) {
383 callerWillBeRemoved = false;
384 continue;
385 }
386 if (IC2.isAlways())
387 continue;
388
389 // See if inlining or original callsite would erase the cost delta of
390 // this callsite. We subtract off the penalty for the call instruction,
391 // which we would be deleting.
392 if (IC2.getCostDelta() <= CandidateCost) {
393 inliningPreventsSomeOuterInline = true;
394 TotalSecondaryCost += IC2.getCost();
395 }
396 }
397 // If all outer calls to Caller would get inlined, the cost for the last
398 // one is set very low by getInlineCost, in anticipation that Caller will
399 // be removed entirely. We did not account for this above unless there
400 // is only one caller of Caller.
401 if (callerWillBeRemoved && !Caller->use_empty())
402 TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
403
404 if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
405 DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
406 " Cost = " << IC.getCost() <<
407 ", outer Cost = " << TotalSecondaryCost << '\n');
408 emitAnalysis(
409 CS, Twine("Not inlining. Cost of inlining " +
410 CS.getCalledFunction()->getName() +
411 " increases the cost of inlining " +
412 CS.getCaller()->getName() + " in other contexts"));
413 return false;
414 }
415 }
416
417 DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
418 << ", thres=" << (IC.getCostDelta() + IC.getCost())
419 << ", Call: " << *CS.getInstruction() << '\n');
420 emitAnalysis(
421 CS, CS.getCalledFunction()->getName() + Twine(" can be inlined into ") +
422 CS.getCaller()->getName() + " with cost=" + Twine(IC.getCost()) +
423 " (threshold=" + Twine(IC.getCostDelta() + IC.getCost()) + ")");
424 return true;
425 }
426
427 /// InlineHistoryIncludes - Return true if the specified inline history ID
428 /// indicates an inline history that includes the specified function.
InlineHistoryIncludes(Function * F,int InlineHistoryID,const SmallVectorImpl<std::pair<Function *,int>> & InlineHistory)429 static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
430 const SmallVectorImpl<std::pair<Function*, int> > &InlineHistory) {
431 while (InlineHistoryID != -1) {
432 assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
433 "Invalid inline history ID");
434 if (InlineHistory[InlineHistoryID].first == F)
435 return true;
436 InlineHistoryID = InlineHistory[InlineHistoryID].second;
437 }
438 return false;
439 }
440
runOnSCC(CallGraphSCC & SCC)441 bool Inliner::runOnSCC(CallGraphSCC &SCC) {
442 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
443 DataLayoutPass *DLP = getAnalysisIfAvailable<DataLayoutPass>();
444 const DataLayout *DL = DLP ? &DLP->getDataLayout() : nullptr;
445 const TargetLibraryInfo *TLI = getAnalysisIfAvailable<TargetLibraryInfo>();
446
447 SmallPtrSet<Function*, 8> SCCFunctions;
448 DEBUG(dbgs() << "Inliner visiting SCC:");
449 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
450 Function *F = (*I)->getFunction();
451 if (F) SCCFunctions.insert(F);
452 DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
453 }
454
455 // Scan through and identify all call sites ahead of time so that we only
456 // inline call sites in the original functions, not call sites that result
457 // from inlining other functions.
458 SmallVector<std::pair<CallSite, int>, 16> CallSites;
459
460 // When inlining a callee produces new call sites, we want to keep track of
461 // the fact that they were inlined from the callee. This allows us to avoid
462 // infinite inlining in some obscure cases. To represent this, we use an
463 // index into the InlineHistory vector.
464 SmallVector<std::pair<Function*, int>, 8> InlineHistory;
465
466 for (CallGraphSCC::iterator I = SCC.begin(), E = SCC.end(); I != E; ++I) {
467 Function *F = (*I)->getFunction();
468 if (!F) continue;
469
470 for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
471 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
472 CallSite CS(cast<Value>(I));
473 // If this isn't a call, or it is a call to an intrinsic, it can
474 // never be inlined.
475 if (!CS || isa<IntrinsicInst>(I))
476 continue;
477
478 // If this is a direct call to an external function, we can never inline
479 // it. If it is an indirect call, inlining may resolve it to be a
480 // direct call, so we keep it.
481 if (CS.getCalledFunction() && CS.getCalledFunction()->isDeclaration())
482 continue;
483
484 CallSites.push_back(std::make_pair(CS, -1));
485 }
486 }
487
488 DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
489
490 // If there are no calls in this function, exit early.
491 if (CallSites.empty())
492 return false;
493
494 // Now that we have all of the call sites, move the ones to functions in the
495 // current SCC to the end of the list.
496 unsigned FirstCallInSCC = CallSites.size();
497 for (unsigned i = 0; i < FirstCallInSCC; ++i)
498 if (Function *F = CallSites[i].first.getCalledFunction())
499 if (SCCFunctions.count(F))
500 std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
501
502
503 InlinedArrayAllocasTy InlinedArrayAllocas;
504 InlineFunctionInfo InlineInfo(&CG, DL);
505
506 // Now that we have all of the call sites, loop over them and inline them if
507 // it looks profitable to do so.
508 bool Changed = false;
509 bool LocalChange;
510 do {
511 LocalChange = false;
512 // Iterate over the outer loop because inlining functions can cause indirect
513 // calls to become direct calls.
514 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
515 CallSite CS = CallSites[CSi].first;
516
517 Function *Caller = CS.getCaller();
518 Function *Callee = CS.getCalledFunction();
519
520 // If this call site is dead and it is to a readonly function, we should
521 // just delete the call instead of trying to inline it, regardless of
522 // size. This happens because IPSCCP propagates the result out of the
523 // call and then we're left with the dead call.
524 if (isInstructionTriviallyDead(CS.getInstruction(), TLI)) {
525 DEBUG(dbgs() << " -> Deleting dead call: "
526 << *CS.getInstruction() << "\n");
527 // Update the call graph by deleting the edge from Callee to Caller.
528 CG[Caller]->removeCallEdgeFor(CS);
529 CS.getInstruction()->eraseFromParent();
530 ++NumCallsDeleted;
531 } else {
532 // We can only inline direct calls to non-declarations.
533 if (!Callee || Callee->isDeclaration()) continue;
534
535 // If this call site was obtained by inlining another function, verify
536 // that the include path for the function did not include the callee
537 // itself. If so, we'd be recursively inlining the same function,
538 // which would provide the same callsites, which would cause us to
539 // infinitely inline.
540 int InlineHistoryID = CallSites[CSi].second;
541 if (InlineHistoryID != -1 &&
542 InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
543 continue;
544
545 LLVMContext &CallerCtx = Caller->getContext();
546
547 // Get DebugLoc to report. CS will be invalid after Inliner.
548 DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
549
550 // If the policy determines that we should inline this function,
551 // try to do so.
552 if (!shouldInline(CS)) {
553 emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
554 Twine(Callee->getName() +
555 " will not be inlined into " +
556 Caller->getName()));
557 continue;
558 }
559
560 // Attempt to inline the function.
561 if (!InlineCallIfPossible(CS, InlineInfo, InlinedArrayAllocas,
562 InlineHistoryID, InsertLifetime, DL)) {
563 emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
564 Twine(Callee->getName() +
565 " will not be inlined into " +
566 Caller->getName()));
567 continue;
568 }
569 ++NumInlined;
570
571 // Report the inline decision.
572 emitOptimizationRemark(
573 CallerCtx, DEBUG_TYPE, *Caller, DLoc,
574 Twine(Callee->getName() + " inlined into " + Caller->getName()));
575
576 // If inlining this function gave us any new call sites, throw them
577 // onto our worklist to process. They are useful inline candidates.
578 if (!InlineInfo.InlinedCalls.empty()) {
579 // Create a new inline history entry for this, so that we remember
580 // that these new callsites came about due to inlining Callee.
581 int NewHistoryID = InlineHistory.size();
582 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
583
584 for (unsigned i = 0, e = InlineInfo.InlinedCalls.size();
585 i != e; ++i) {
586 Value *Ptr = InlineInfo.InlinedCalls[i];
587 CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
588 }
589 }
590 }
591
592 // If we inlined or deleted the last possible call site to the function,
593 // delete the function body now.
594 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
595 // TODO: Can remove if in SCC now.
596 !SCCFunctions.count(Callee) &&
597
598 // The function may be apparently dead, but if there are indirect
599 // callgraph references to the node, we cannot delete it yet, this
600 // could invalidate the CGSCC iterator.
601 CG[Callee]->getNumReferences() == 0) {
602 DEBUG(dbgs() << " -> Deleting dead function: "
603 << Callee->getName() << "\n");
604 CallGraphNode *CalleeNode = CG[Callee];
605
606 // Remove any call graph edges from the callee to its callees.
607 CalleeNode->removeAllCalledFunctions();
608
609 // Removing the node for callee from the call graph and delete it.
610 delete CG.removeFunctionFromModule(CalleeNode);
611 ++NumDeleted;
612 }
613
614 // Remove this call site from the list. If possible, use
615 // swap/pop_back for efficiency, but do not use it if doing so would
616 // move a call site to a function in this SCC before the
617 // 'FirstCallInSCC' barrier.
618 if (SCC.isSingular()) {
619 CallSites[CSi] = CallSites.back();
620 CallSites.pop_back();
621 } else {
622 CallSites.erase(CallSites.begin()+CSi);
623 }
624 --CSi;
625
626 Changed = true;
627 LocalChange = true;
628 }
629 } while (LocalChange);
630
631 return Changed;
632 }
633
634 // doFinalization - Remove now-dead linkonce functions at the end of
635 // processing to avoid breaking the SCC traversal.
doFinalization(CallGraph & CG)636 bool Inliner::doFinalization(CallGraph &CG) {
637 return removeDeadFunctions(CG);
638 }
639
640 /// removeDeadFunctions - Remove dead functions that are not included in
641 /// DNR (Do Not Remove) list.
removeDeadFunctions(CallGraph & CG,bool AlwaysInlineOnly)642 bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
643 SmallVector<CallGraphNode*, 16> FunctionsToRemove;
644
645 // Scan for all of the functions, looking for ones that should now be removed
646 // from the program. Insert the dead ones in the FunctionsToRemove set.
647 for (CallGraph::iterator I = CG.begin(), E = CG.end(); I != E; ++I) {
648 CallGraphNode *CGN = I->second;
649 Function *F = CGN->getFunction();
650 if (!F || F->isDeclaration())
651 continue;
652
653 // Handle the case when this function is called and we only want to care
654 // about always-inline functions. This is a bit of a hack to share code
655 // between here and the InlineAlways pass.
656 if (AlwaysInlineOnly &&
657 !F->getAttributes().hasAttribute(AttributeSet::FunctionIndex,
658 Attribute::AlwaysInline))
659 continue;
660
661 // If the only remaining users of the function are dead constants, remove
662 // them.
663 F->removeDeadConstantUsers();
664
665 if (!F->isDefTriviallyDead())
666 continue;
667
668 // Remove any call graph edges from the function to its callees.
669 CGN->removeAllCalledFunctions();
670
671 // Remove any edges from the external node to the function's call graph
672 // node. These edges might have been made irrelegant due to
673 // optimization of the program.
674 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
675
676 // Removing the node for callee from the call graph and delete it.
677 FunctionsToRemove.push_back(CGN);
678 }
679 if (FunctionsToRemove.empty())
680 return false;
681
682 // Now that we know which functions to delete, do so. We didn't want to do
683 // this inline, because that would invalidate our CallGraph::iterator
684 // objects. :(
685 //
686 // Note that it doesn't matter that we are iterating over a non-stable order
687 // here to do this, it doesn't matter which order the functions are deleted
688 // in.
689 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
690 FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
691 FunctionsToRemove.end()),
692 FunctionsToRemove.end());
693 for (SmallVectorImpl<CallGraphNode *>::iterator I = FunctionsToRemove.begin(),
694 E = FunctionsToRemove.end();
695 I != E; ++I) {
696 delete CG.removeFunctionFromModule(*I);
697 ++NumDeleted;
698 }
699 return true;
700 }
701