1 //===- Inliner.cpp - Code common to all inliners --------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the mechanics required to implement inlining without
11 // missing any calls and updating the call graph. The decisions of which calls
12 // are profitable to inline are implemented elsewhere.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Transforms/IPO/InlinerPass.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/Analysis/AliasAnalysis.h"
20 #include "llvm/Analysis/AssumptionCache.h"
21 #include "llvm/Analysis/BasicAliasAnalysis.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/InlineCost.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
25 #include "llvm/IR/CallSite.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/DiagnosticInfo.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Transforms/Utils/Cloning.h"
35 #include "llvm/Transforms/Utils/Local.h"
36 using namespace llvm;
37
38 #define DEBUG_TYPE "inline"
39
40 STATISTIC(NumInlined, "Number of functions inlined");
41 STATISTIC(NumCallsDeleted, "Number of call sites deleted, not inlined");
42 STATISTIC(NumDeleted, "Number of functions deleted because all callers found");
43 STATISTIC(NumMergedAllocas, "Number of allocas merged together");
44
45 // This weirdly named statistic tracks the number of times that, when attempting
46 // to inline a function A into B, we analyze the callers of B in order to see
47 // if those would be more profitable and blocked inline steps.
48 STATISTIC(NumCallerCallersAnalyzed, "Number of caller-callers analyzed");
49
50 static cl::opt<int>
51 InlineLimit("inline-threshold", cl::Hidden, cl::init(225), cl::ZeroOrMore,
52 cl::desc("Control the amount of inlining to perform (default = 225)"));
53
54 static cl::opt<int>
55 HintThreshold("inlinehint-threshold", cl::Hidden, cl::init(325),
56 cl::desc("Threshold for inlining functions with inline hint"));
57
58 // We instroduce this threshold to help performance of instrumentation based
59 // PGO before we actually hook up inliner with analysis passes such as BPI and
60 // BFI.
61 static cl::opt<int>
62 ColdThreshold("inlinecold-threshold", cl::Hidden, cl::init(225),
63 cl::desc("Threshold for inlining functions with cold attribute"));
64
65 // Threshold to use when optsize is specified (and there is no -inline-limit).
66 const int OptSizeThreshold = 75;
67
Inliner(char & ID)68 Inliner::Inliner(char &ID)
69 : CallGraphSCCPass(ID), InlineThreshold(InlineLimit), InsertLifetime(true) {}
70
Inliner(char & ID,int Threshold,bool InsertLifetime)71 Inliner::Inliner(char &ID, int Threshold, bool InsertLifetime)
72 : CallGraphSCCPass(ID), InlineThreshold(InlineLimit.getNumOccurrences() > 0 ?
73 InlineLimit : Threshold),
74 InsertLifetime(InsertLifetime) {}
75
76 /// For this class, we declare that we require and preserve the call graph.
77 /// If the derived class implements this method, it should
78 /// always explicitly call the implementation here.
getAnalysisUsage(AnalysisUsage & AU) const79 void Inliner::getAnalysisUsage(AnalysisUsage &AU) const {
80 AU.addRequired<AssumptionCacheTracker>();
81 AU.addRequired<TargetLibraryInfoWrapperPass>();
82 CallGraphSCCPass::getAnalysisUsage(AU);
83 }
84
85
86 typedef DenseMap<ArrayType*, std::vector<AllocaInst*> >
87 InlinedArrayAllocasTy;
88
89 /// \brief If the inlined function had a higher stack protection level than the
90 /// calling function, then bump up the caller's stack protection level.
AdjustCallerSSPLevel(Function * Caller,Function * Callee)91 static void AdjustCallerSSPLevel(Function *Caller, Function *Callee) {
92 // If upgrading the SSP attribute, clear out the old SSP Attributes first.
93 // Having multiple SSP attributes doesn't actually hurt, but it adds useless
94 // clutter to the IR.
95 AttrBuilder B;
96 B.addAttribute(Attribute::StackProtect)
97 .addAttribute(Attribute::StackProtectStrong)
98 .addAttribute(Attribute::StackProtectReq);
99 AttributeSet OldSSPAttr = AttributeSet::get(Caller->getContext(),
100 AttributeSet::FunctionIndex,
101 B);
102
103 if (Callee->hasFnAttribute(Attribute::SafeStack)) {
104 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
105 Caller->addFnAttr(Attribute::SafeStack);
106 } else if (Callee->hasFnAttribute(Attribute::StackProtectReq) &&
107 !Caller->hasFnAttribute(Attribute::SafeStack)) {
108 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
109 Caller->addFnAttr(Attribute::StackProtectReq);
110 } else if (Callee->hasFnAttribute(Attribute::StackProtectStrong) &&
111 !Caller->hasFnAttribute(Attribute::SafeStack) &&
112 !Caller->hasFnAttribute(Attribute::StackProtectReq)) {
113 Caller->removeAttributes(AttributeSet::FunctionIndex, OldSSPAttr);
114 Caller->addFnAttr(Attribute::StackProtectStrong);
115 } else if (Callee->hasFnAttribute(Attribute::StackProtect) &&
116 !Caller->hasFnAttribute(Attribute::SafeStack) &&
117 !Caller->hasFnAttribute(Attribute::StackProtectReq) &&
118 !Caller->hasFnAttribute(Attribute::StackProtectStrong))
119 Caller->addFnAttr(Attribute::StackProtect);
120 }
121
122 /// If it is possible to inline the specified call site,
123 /// do so and update the CallGraph for this operation.
124 ///
125 /// This function also does some basic book-keeping to update the IR. The
126 /// InlinedArrayAllocas map keeps track of any allocas that are already
127 /// available from other functions inlined into the caller. If we are able to
128 /// inline this call site we attempt to reuse already available allocas or add
129 /// any new allocas to the set if not possible.
InlineCallIfPossible(Pass & P,CallSite CS,InlineFunctionInfo & IFI,InlinedArrayAllocasTy & InlinedArrayAllocas,int InlineHistory,bool InsertLifetime)130 static bool InlineCallIfPossible(Pass &P, CallSite CS, InlineFunctionInfo &IFI,
131 InlinedArrayAllocasTy &InlinedArrayAllocas,
132 int InlineHistory, bool InsertLifetime) {
133 Function *Callee = CS.getCalledFunction();
134 Function *Caller = CS.getCaller();
135
136 // We need to manually construct BasicAA directly in order to disable
137 // its use of other function analyses.
138 BasicAAResult BAR(createLegacyPMBasicAAResult(P, *Callee));
139
140 // Construct our own AA results for this function. We do this manually to
141 // work around the limitations of the legacy pass manager.
142 AAResults AAR(createLegacyPMAAResults(P, *Callee, BAR));
143
144 // Try to inline the function. Get the list of static allocas that were
145 // inlined.
146 if (!InlineFunction(CS, IFI, &AAR, InsertLifetime))
147 return false;
148
149 AdjustCallerSSPLevel(Caller, Callee);
150
151 // Look at all of the allocas that we inlined through this call site. If we
152 // have already inlined other allocas through other calls into this function,
153 // then we know that they have disjoint lifetimes and that we can merge them.
154 //
155 // There are many heuristics possible for merging these allocas, and the
156 // different options have different tradeoffs. One thing that we *really*
157 // don't want to hurt is SRoA: once inlining happens, often allocas are no
158 // longer address taken and so they can be promoted.
159 //
160 // Our "solution" for that is to only merge allocas whose outermost type is an
161 // array type. These are usually not promoted because someone is using a
162 // variable index into them. These are also often the most important ones to
163 // merge.
164 //
165 // A better solution would be to have real memory lifetime markers in the IR
166 // and not have the inliner do any merging of allocas at all. This would
167 // allow the backend to do proper stack slot coloring of all allocas that
168 // *actually make it to the backend*, which is really what we want.
169 //
170 // Because we don't have this information, we do this simple and useful hack.
171 //
172 SmallPtrSet<AllocaInst*, 16> UsedAllocas;
173
174 // When processing our SCC, check to see if CS was inlined from some other
175 // call site. For example, if we're processing "A" in this code:
176 // A() { B() }
177 // B() { x = alloca ... C() }
178 // C() { y = alloca ... }
179 // Assume that C was not inlined into B initially, and so we're processing A
180 // and decide to inline B into A. Doing this makes an alloca available for
181 // reuse and makes a callsite (C) available for inlining. When we process
182 // the C call site we don't want to do any alloca merging between X and Y
183 // because their scopes are not disjoint. We could make this smarter by
184 // keeping track of the inline history for each alloca in the
185 // InlinedArrayAllocas but this isn't likely to be a significant win.
186 if (InlineHistory != -1) // Only do merging for top-level call sites in SCC.
187 return true;
188
189 // Loop over all the allocas we have so far and see if they can be merged with
190 // a previously inlined alloca. If not, remember that we had it.
191 for (unsigned AllocaNo = 0, e = IFI.StaticAllocas.size();
192 AllocaNo != e; ++AllocaNo) {
193 AllocaInst *AI = IFI.StaticAllocas[AllocaNo];
194
195 // Don't bother trying to merge array allocations (they will usually be
196 // canonicalized to be an allocation *of* an array), or allocations whose
197 // type is not itself an array (because we're afraid of pessimizing SRoA).
198 ArrayType *ATy = dyn_cast<ArrayType>(AI->getAllocatedType());
199 if (!ATy || AI->isArrayAllocation())
200 continue;
201
202 // Get the list of all available allocas for this array type.
203 std::vector<AllocaInst*> &AllocasForType = InlinedArrayAllocas[ATy];
204
205 // Loop over the allocas in AllocasForType to see if we can reuse one. Note
206 // that we have to be careful not to reuse the same "available" alloca for
207 // multiple different allocas that we just inlined, we use the 'UsedAllocas'
208 // set to keep track of which "available" allocas are being used by this
209 // function. Also, AllocasForType can be empty of course!
210 bool MergedAwayAlloca = false;
211 for (AllocaInst *AvailableAlloca : AllocasForType) {
212
213 unsigned Align1 = AI->getAlignment(),
214 Align2 = AvailableAlloca->getAlignment();
215
216 // The available alloca has to be in the right function, not in some other
217 // function in this SCC.
218 if (AvailableAlloca->getParent() != AI->getParent())
219 continue;
220
221 // If the inlined function already uses this alloca then we can't reuse
222 // it.
223 if (!UsedAllocas.insert(AvailableAlloca).second)
224 continue;
225
226 // Otherwise, we *can* reuse it, RAUW AI into AvailableAlloca and declare
227 // success!
228 DEBUG(dbgs() << " ***MERGED ALLOCA: " << *AI << "\n\t\tINTO: "
229 << *AvailableAlloca << '\n');
230
231 // Move affected dbg.declare calls immediately after the new alloca to
232 // avoid the situation when a dbg.declare preceeds its alloca.
233 if (auto *L = LocalAsMetadata::getIfExists(AI))
234 if (auto *MDV = MetadataAsValue::getIfExists(AI->getContext(), L))
235 for (User *U : MDV->users())
236 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(U))
237 DDI->moveBefore(AvailableAlloca->getNextNode());
238
239 AI->replaceAllUsesWith(AvailableAlloca);
240
241 if (Align1 != Align2) {
242 if (!Align1 || !Align2) {
243 const DataLayout &DL = Caller->getParent()->getDataLayout();
244 unsigned TypeAlign = DL.getABITypeAlignment(AI->getAllocatedType());
245
246 Align1 = Align1 ? Align1 : TypeAlign;
247 Align2 = Align2 ? Align2 : TypeAlign;
248 }
249
250 if (Align1 > Align2)
251 AvailableAlloca->setAlignment(AI->getAlignment());
252 }
253
254 AI->eraseFromParent();
255 MergedAwayAlloca = true;
256 ++NumMergedAllocas;
257 IFI.StaticAllocas[AllocaNo] = nullptr;
258 break;
259 }
260
261 // If we already nuked the alloca, we're done with it.
262 if (MergedAwayAlloca)
263 continue;
264
265 // If we were unable to merge away the alloca either because there are no
266 // allocas of the right type available or because we reused them all
267 // already, remember that this alloca came from an inlined function and mark
268 // it used so we don't reuse it for other allocas from this inline
269 // operation.
270 AllocasForType.push_back(AI);
271 UsedAllocas.insert(AI);
272 }
273
274 return true;
275 }
276
getInlineThreshold(CallSite CS) const277 unsigned Inliner::getInlineThreshold(CallSite CS) const {
278 int Threshold = InlineThreshold; // -inline-threshold or else selected by
279 // overall opt level
280
281 // If -inline-threshold is not given, listen to the optsize attribute when it
282 // would decrease the threshold.
283 Function *Caller = CS.getCaller();
284 bool OptSize = Caller && !Caller->isDeclaration() &&
285 // FIXME: Use Function::optForSize().
286 Caller->hasFnAttribute(Attribute::OptimizeForSize);
287 if (!(InlineLimit.getNumOccurrences() > 0) && OptSize &&
288 OptSizeThreshold < Threshold)
289 Threshold = OptSizeThreshold;
290
291 Function *Callee = CS.getCalledFunction();
292 if (!Callee || Callee->isDeclaration())
293 return Threshold;
294
295 // If profile information is available, use that to adjust threshold of hot
296 // and cold functions.
297 // FIXME: The heuristic used below for determining hotness and coldness are
298 // based on preliminary SPEC tuning and may not be optimal. Replace this with
299 // a well-tuned heuristic based on *callsite* hotness and not callee hotness.
300 uint64_t FunctionCount = 0, MaxFunctionCount = 0;
301 bool HasPGOCounts = false;
302 if (Callee->getEntryCount() &&
303 Callee->getParent()->getMaximumFunctionCount()) {
304 HasPGOCounts = true;
305 FunctionCount = Callee->getEntryCount().getValue();
306 MaxFunctionCount =
307 Callee->getParent()->getMaximumFunctionCount().getValue();
308 }
309
310 // Listen to the inlinehint attribute or profile based hotness information
311 // when it would increase the threshold and the caller does not need to
312 // minimize its size.
313 bool InlineHint =
314 Callee->hasFnAttribute(Attribute::InlineHint) ||
315 (HasPGOCounts &&
316 FunctionCount >= (uint64_t)(0.3 * (double)MaxFunctionCount));
317 if (InlineHint && HintThreshold > Threshold &&
318 !Caller->hasFnAttribute(Attribute::MinSize))
319 Threshold = HintThreshold;
320
321 // Listen to the cold attribute or profile based coldness information
322 // when it would decrease the threshold.
323 bool ColdCallee =
324 Callee->hasFnAttribute(Attribute::Cold) ||
325 (HasPGOCounts &&
326 FunctionCount <= (uint64_t)(0.01 * (double)MaxFunctionCount));
327 // Command line argument for InlineLimit will override the default
328 // ColdThreshold. If we have -inline-threshold but no -inlinecold-threshold,
329 // do not use the default cold threshold even if it is smaller.
330 if ((InlineLimit.getNumOccurrences() == 0 ||
331 ColdThreshold.getNumOccurrences() > 0) && ColdCallee &&
332 ColdThreshold < Threshold)
333 Threshold = ColdThreshold;
334
335 return Threshold;
336 }
337
emitAnalysis(CallSite CS,const Twine & Msg)338 static void emitAnalysis(CallSite CS, const Twine &Msg) {
339 Function *Caller = CS.getCaller();
340 LLVMContext &Ctx = Caller->getContext();
341 DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
342 emitOptimizationRemarkAnalysis(Ctx, DEBUG_TYPE, *Caller, DLoc, Msg);
343 }
344
345 /// Return true if the inliner should attempt to inline at the given CallSite.
shouldInline(CallSite CS)346 bool Inliner::shouldInline(CallSite CS) {
347 InlineCost IC = getInlineCost(CS);
348
349 if (IC.isAlways()) {
350 DEBUG(dbgs() << " Inlining: cost=always"
351 << ", Call: " << *CS.getInstruction() << "\n");
352 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName()) +
353 " should always be inlined (cost=always)");
354 return true;
355 }
356
357 if (IC.isNever()) {
358 DEBUG(dbgs() << " NOT Inlining: cost=never"
359 << ", Call: " << *CS.getInstruction() << "\n");
360 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
361 " should never be inlined (cost=never)"));
362 return false;
363 }
364
365 Function *Caller = CS.getCaller();
366 if (!IC) {
367 DEBUG(dbgs() << " NOT Inlining: cost=" << IC.getCost()
368 << ", thres=" << (IC.getCostDelta() + IC.getCost())
369 << ", Call: " << *CS.getInstruction() << "\n");
370 emitAnalysis(CS, Twine(CS.getCalledFunction()->getName() +
371 " too costly to inline (cost=") +
372 Twine(IC.getCost()) + ", threshold=" +
373 Twine(IC.getCostDelta() + IC.getCost()) + ")");
374 return false;
375 }
376
377 // Try to detect the case where the current inlining candidate caller (call
378 // it B) is a static or linkonce-ODR function and is an inlining candidate
379 // elsewhere, and the current candidate callee (call it C) is large enough
380 // that inlining it into B would make B too big to inline later. In these
381 // circumstances it may be best not to inline C into B, but to inline B into
382 // its callers.
383 //
384 // This only applies to static and linkonce-ODR functions because those are
385 // expected to be available for inlining in the translation units where they
386 // are used. Thus we will always have the opportunity to make local inlining
387 // decisions. Importantly the linkonce-ODR linkage covers inline functions
388 // and templates in C++.
389 //
390 // FIXME: All of this logic should be sunk into getInlineCost. It relies on
391 // the internal implementation of the inline cost metrics rather than
392 // treating them as truly abstract units etc.
393 if (Caller->hasLocalLinkage() || Caller->hasLinkOnceODRLinkage()) {
394 int TotalSecondaryCost = 0;
395 // The candidate cost to be imposed upon the current function.
396 int CandidateCost = IC.getCost() - (InlineConstants::CallPenalty + 1);
397 // This bool tracks what happens if we do NOT inline C into B.
398 bool callerWillBeRemoved = Caller->hasLocalLinkage();
399 // This bool tracks what happens if we DO inline C into B.
400 bool inliningPreventsSomeOuterInline = false;
401 for (User *U : Caller->users()) {
402 CallSite CS2(U);
403
404 // If this isn't a call to Caller (it could be some other sort
405 // of reference) skip it. Such references will prevent the caller
406 // from being removed.
407 if (!CS2 || CS2.getCalledFunction() != Caller) {
408 callerWillBeRemoved = false;
409 continue;
410 }
411
412 InlineCost IC2 = getInlineCost(CS2);
413 ++NumCallerCallersAnalyzed;
414 if (!IC2) {
415 callerWillBeRemoved = false;
416 continue;
417 }
418 if (IC2.isAlways())
419 continue;
420
421 // See if inlining or original callsite would erase the cost delta of
422 // this callsite. We subtract off the penalty for the call instruction,
423 // which we would be deleting.
424 if (IC2.getCostDelta() <= CandidateCost) {
425 inliningPreventsSomeOuterInline = true;
426 TotalSecondaryCost += IC2.getCost();
427 }
428 }
429 // If all outer calls to Caller would get inlined, the cost for the last
430 // one is set very low by getInlineCost, in anticipation that Caller will
431 // be removed entirely. We did not account for this above unless there
432 // is only one caller of Caller.
433 if (callerWillBeRemoved && !Caller->use_empty())
434 TotalSecondaryCost += InlineConstants::LastCallToStaticBonus;
435
436 if (inliningPreventsSomeOuterInline && TotalSecondaryCost < IC.getCost()) {
437 DEBUG(dbgs() << " NOT Inlining: " << *CS.getInstruction() <<
438 " Cost = " << IC.getCost() <<
439 ", outer Cost = " << TotalSecondaryCost << '\n');
440 emitAnalysis(
441 CS, Twine("Not inlining. Cost of inlining " +
442 CS.getCalledFunction()->getName() +
443 " increases the cost of inlining " +
444 CS.getCaller()->getName() + " in other contexts"));
445 return false;
446 }
447 }
448
449 DEBUG(dbgs() << " Inlining: cost=" << IC.getCost()
450 << ", thres=" << (IC.getCostDelta() + IC.getCost())
451 << ", Call: " << *CS.getInstruction() << '\n');
452 emitAnalysis(
453 CS, CS.getCalledFunction()->getName() + Twine(" can be inlined into ") +
454 CS.getCaller()->getName() + " with cost=" + Twine(IC.getCost()) +
455 " (threshold=" + Twine(IC.getCostDelta() + IC.getCost()) + ")");
456 return true;
457 }
458
459 /// Return true if the specified inline history ID
460 /// indicates an inline history that includes the specified function.
InlineHistoryIncludes(Function * F,int InlineHistoryID,const SmallVectorImpl<std::pair<Function *,int>> & InlineHistory)461 static bool InlineHistoryIncludes(Function *F, int InlineHistoryID,
462 const SmallVectorImpl<std::pair<Function*, int> > &InlineHistory) {
463 while (InlineHistoryID != -1) {
464 assert(unsigned(InlineHistoryID) < InlineHistory.size() &&
465 "Invalid inline history ID");
466 if (InlineHistory[InlineHistoryID].first == F)
467 return true;
468 InlineHistoryID = InlineHistory[InlineHistoryID].second;
469 }
470 return false;
471 }
472
runOnSCC(CallGraphSCC & SCC)473 bool Inliner::runOnSCC(CallGraphSCC &SCC) {
474 CallGraph &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
475 AssumptionCacheTracker *ACT = &getAnalysis<AssumptionCacheTracker>();
476 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
477
478 SmallPtrSet<Function*, 8> SCCFunctions;
479 DEBUG(dbgs() << "Inliner visiting SCC:");
480 for (CallGraphNode *Node : SCC) {
481 Function *F = Node->getFunction();
482 if (F) SCCFunctions.insert(F);
483 DEBUG(dbgs() << " " << (F ? F->getName() : "INDIRECTNODE"));
484 }
485
486 // Scan through and identify all call sites ahead of time so that we only
487 // inline call sites in the original functions, not call sites that result
488 // from inlining other functions.
489 SmallVector<std::pair<CallSite, int>, 16> CallSites;
490
491 // When inlining a callee produces new call sites, we want to keep track of
492 // the fact that they were inlined from the callee. This allows us to avoid
493 // infinite inlining in some obscure cases. To represent this, we use an
494 // index into the InlineHistory vector.
495 SmallVector<std::pair<Function*, int>, 8> InlineHistory;
496
497 for (CallGraphNode *Node : SCC) {
498 Function *F = Node->getFunction();
499 if (!F) continue;
500
501 for (BasicBlock &BB : *F)
502 for (Instruction &I : BB) {
503 CallSite CS(cast<Value>(&I));
504 // If this isn't a call, or it is a call to an intrinsic, it can
505 // never be inlined.
506 if (!CS || isa<IntrinsicInst>(I))
507 continue;
508
509 // If this is a direct call to an external function, we can never inline
510 // it. If it is an indirect call, inlining may resolve it to be a
511 // direct call, so we keep it.
512 if (Function *Callee = CS.getCalledFunction())
513 if (Callee->isDeclaration())
514 continue;
515
516 CallSites.push_back(std::make_pair(CS, -1));
517 }
518 }
519
520 DEBUG(dbgs() << ": " << CallSites.size() << " call sites.\n");
521
522 // If there are no calls in this function, exit early.
523 if (CallSites.empty())
524 return false;
525
526 // Now that we have all of the call sites, move the ones to functions in the
527 // current SCC to the end of the list.
528 unsigned FirstCallInSCC = CallSites.size();
529 for (unsigned i = 0; i < FirstCallInSCC; ++i)
530 if (Function *F = CallSites[i].first.getCalledFunction())
531 if (SCCFunctions.count(F))
532 std::swap(CallSites[i--], CallSites[--FirstCallInSCC]);
533
534
535 InlinedArrayAllocasTy InlinedArrayAllocas;
536 InlineFunctionInfo InlineInfo(&CG, ACT);
537
538 // Now that we have all of the call sites, loop over them and inline them if
539 // it looks profitable to do so.
540 bool Changed = false;
541 bool LocalChange;
542 do {
543 LocalChange = false;
544 // Iterate over the outer loop because inlining functions can cause indirect
545 // calls to become direct calls.
546 // CallSites may be modified inside so ranged for loop can not be used.
547 for (unsigned CSi = 0; CSi != CallSites.size(); ++CSi) {
548 CallSite CS = CallSites[CSi].first;
549
550 Function *Caller = CS.getCaller();
551 Function *Callee = CS.getCalledFunction();
552
553 // If this call site is dead and it is to a readonly function, we should
554 // just delete the call instead of trying to inline it, regardless of
555 // size. This happens because IPSCCP propagates the result out of the
556 // call and then we're left with the dead call.
557 if (isInstructionTriviallyDead(CS.getInstruction(), &TLI)) {
558 DEBUG(dbgs() << " -> Deleting dead call: "
559 << *CS.getInstruction() << "\n");
560 // Update the call graph by deleting the edge from Callee to Caller.
561 CG[Caller]->removeCallEdgeFor(CS);
562 CS.getInstruction()->eraseFromParent();
563 ++NumCallsDeleted;
564 } else {
565 // We can only inline direct calls to non-declarations.
566 if (!Callee || Callee->isDeclaration()) continue;
567
568 // If this call site was obtained by inlining another function, verify
569 // that the include path for the function did not include the callee
570 // itself. If so, we'd be recursively inlining the same function,
571 // which would provide the same callsites, which would cause us to
572 // infinitely inline.
573 int InlineHistoryID = CallSites[CSi].second;
574 if (InlineHistoryID != -1 &&
575 InlineHistoryIncludes(Callee, InlineHistoryID, InlineHistory))
576 continue;
577
578 LLVMContext &CallerCtx = Caller->getContext();
579
580 // Get DebugLoc to report. CS will be invalid after Inliner.
581 DebugLoc DLoc = CS.getInstruction()->getDebugLoc();
582
583 // If the policy determines that we should inline this function,
584 // try to do so.
585 if (!shouldInline(CS)) {
586 emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
587 Twine(Callee->getName() +
588 " will not be inlined into " +
589 Caller->getName()));
590 continue;
591 }
592
593 // Attempt to inline the function.
594 if (!InlineCallIfPossible(*this, CS, InlineInfo, InlinedArrayAllocas,
595 InlineHistoryID, InsertLifetime)) {
596 emitOptimizationRemarkMissed(CallerCtx, DEBUG_TYPE, *Caller, DLoc,
597 Twine(Callee->getName() +
598 " will not be inlined into " +
599 Caller->getName()));
600 continue;
601 }
602 ++NumInlined;
603
604 // Report the inline decision.
605 emitOptimizationRemark(
606 CallerCtx, DEBUG_TYPE, *Caller, DLoc,
607 Twine(Callee->getName() + " inlined into " + Caller->getName()));
608
609 // If inlining this function gave us any new call sites, throw them
610 // onto our worklist to process. They are useful inline candidates.
611 if (!InlineInfo.InlinedCalls.empty()) {
612 // Create a new inline history entry for this, so that we remember
613 // that these new callsites came about due to inlining Callee.
614 int NewHistoryID = InlineHistory.size();
615 InlineHistory.push_back(std::make_pair(Callee, InlineHistoryID));
616
617 for (Value *Ptr : InlineInfo.InlinedCalls)
618 CallSites.push_back(std::make_pair(CallSite(Ptr), NewHistoryID));
619 }
620 }
621
622 // If we inlined or deleted the last possible call site to the function,
623 // delete the function body now.
624 if (Callee && Callee->use_empty() && Callee->hasLocalLinkage() &&
625 // TODO: Can remove if in SCC now.
626 !SCCFunctions.count(Callee) &&
627
628 // The function may be apparently dead, but if there are indirect
629 // callgraph references to the node, we cannot delete it yet, this
630 // could invalidate the CGSCC iterator.
631 CG[Callee]->getNumReferences() == 0) {
632 DEBUG(dbgs() << " -> Deleting dead function: "
633 << Callee->getName() << "\n");
634 CallGraphNode *CalleeNode = CG[Callee];
635
636 // Remove any call graph edges from the callee to its callees.
637 CalleeNode->removeAllCalledFunctions();
638
639 // Removing the node for callee from the call graph and delete it.
640 delete CG.removeFunctionFromModule(CalleeNode);
641 ++NumDeleted;
642 }
643
644 // Remove this call site from the list. If possible, use
645 // swap/pop_back for efficiency, but do not use it if doing so would
646 // move a call site to a function in this SCC before the
647 // 'FirstCallInSCC' barrier.
648 if (SCC.isSingular()) {
649 CallSites[CSi] = CallSites.back();
650 CallSites.pop_back();
651 } else {
652 CallSites.erase(CallSites.begin()+CSi);
653 }
654 --CSi;
655
656 Changed = true;
657 LocalChange = true;
658 }
659 } while (LocalChange);
660
661 return Changed;
662 }
663
664 /// Remove now-dead linkonce functions at the end of
665 /// processing to avoid breaking the SCC traversal.
doFinalization(CallGraph & CG)666 bool Inliner::doFinalization(CallGraph &CG) {
667 return removeDeadFunctions(CG);
668 }
669
670 /// Remove dead functions that are not included in DNR (Do Not Remove) list.
removeDeadFunctions(CallGraph & CG,bool AlwaysInlineOnly)671 bool Inliner::removeDeadFunctions(CallGraph &CG, bool AlwaysInlineOnly) {
672 SmallVector<CallGraphNode*, 16> FunctionsToRemove;
673 SmallVector<CallGraphNode *, 16> DeadFunctionsInComdats;
674 SmallDenseMap<const Comdat *, int, 16> ComdatEntriesAlive;
675
676 auto RemoveCGN = [&](CallGraphNode *CGN) {
677 // Remove any call graph edges from the function to its callees.
678 CGN->removeAllCalledFunctions();
679
680 // Remove any edges from the external node to the function's call graph
681 // node. These edges might have been made irrelegant due to
682 // optimization of the program.
683 CG.getExternalCallingNode()->removeAnyCallEdgeTo(CGN);
684
685 // Removing the node for callee from the call graph and delete it.
686 FunctionsToRemove.push_back(CGN);
687 };
688
689 // Scan for all of the functions, looking for ones that should now be removed
690 // from the program. Insert the dead ones in the FunctionsToRemove set.
691 for (const auto &I : CG) {
692 CallGraphNode *CGN = I.second.get();
693 Function *F = CGN->getFunction();
694 if (!F || F->isDeclaration())
695 continue;
696
697 // Handle the case when this function is called and we only want to care
698 // about always-inline functions. This is a bit of a hack to share code
699 // between here and the InlineAlways pass.
700 if (AlwaysInlineOnly && !F->hasFnAttribute(Attribute::AlwaysInline))
701 continue;
702
703 // If the only remaining users of the function are dead constants, remove
704 // them.
705 F->removeDeadConstantUsers();
706
707 if (!F->isDefTriviallyDead())
708 continue;
709
710 // It is unsafe to drop a function with discardable linkage from a COMDAT
711 // without also dropping the other members of the COMDAT.
712 // The inliner doesn't visit non-function entities which are in COMDAT
713 // groups so it is unsafe to do so *unless* the linkage is local.
714 if (!F->hasLocalLinkage()) {
715 if (const Comdat *C = F->getComdat()) {
716 --ComdatEntriesAlive[C];
717 DeadFunctionsInComdats.push_back(CGN);
718 continue;
719 }
720 }
721
722 RemoveCGN(CGN);
723 }
724 if (!DeadFunctionsInComdats.empty()) {
725 // Count up all the entities in COMDAT groups
726 auto ComdatGroupReferenced = [&](const Comdat *C) {
727 auto I = ComdatEntriesAlive.find(C);
728 if (I != ComdatEntriesAlive.end())
729 ++(I->getSecond());
730 };
731 for (const Function &F : CG.getModule())
732 if (const Comdat *C = F.getComdat())
733 ComdatGroupReferenced(C);
734 for (const GlobalVariable &GV : CG.getModule().globals())
735 if (const Comdat *C = GV.getComdat())
736 ComdatGroupReferenced(C);
737 for (const GlobalAlias &GA : CG.getModule().aliases())
738 if (const Comdat *C = GA.getComdat())
739 ComdatGroupReferenced(C);
740 for (CallGraphNode *CGN : DeadFunctionsInComdats) {
741 Function *F = CGN->getFunction();
742 const Comdat *C = F->getComdat();
743 int NumAlive = ComdatEntriesAlive[C];
744 // We can remove functions in a COMDAT group if the entire group is dead.
745 assert(NumAlive >= 0);
746 if (NumAlive > 0)
747 continue;
748
749 RemoveCGN(CGN);
750 }
751 }
752
753 if (FunctionsToRemove.empty())
754 return false;
755
756 // Now that we know which functions to delete, do so. We didn't want to do
757 // this inline, because that would invalidate our CallGraph::iterator
758 // objects. :(
759 //
760 // Note that it doesn't matter that we are iterating over a non-stable order
761 // here to do this, it doesn't matter which order the functions are deleted
762 // in.
763 array_pod_sort(FunctionsToRemove.begin(), FunctionsToRemove.end());
764 FunctionsToRemove.erase(std::unique(FunctionsToRemove.begin(),
765 FunctionsToRemove.end()),
766 FunctionsToRemove.end());
767 for (CallGraphNode *CGN : FunctionsToRemove) {
768 delete CG.removeFunctionFromModule(CGN);
769 ++NumDeleted;
770 }
771 return true;
772 }
773